Commit de7007e9 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-6.12-2024-10-18' of git://git.infradead.org/nvme into block-6.12

Pull NVMe fixes from Keith:

"nvme fixes for Linux 6.12

 - Fix target passthrough identifier (Nilay)
 - Fix tcp locking (Hannes)
 - Replace list with sbitmap for tracking RDMA rsp tags (Guixen)
 - Remove unnecessary fallthrough statements (Tokunori)
 - Remove ready-without-media support (Greg)
 - Fix multipath partition scan deadlock (Keith)
 - Fix concurrent PCI reset and remove queue mapping (Maurizio)
 - Fabrics shutdown fixes (Nilay)"

* tag 'nvme-6.12-2024-10-18' of git://git.infradead.org/nvme:
  nvme: use helper nvme_ctrl_state in nvme_keep_alive_finish function
  nvme: make keep-alive synchronous operation
  nvme-loop: flush off pending I/O while shutting down loop controller
  nvme-pci: fix race condition between reset and nvme_dev_disable()
  nvme-multipath: defer partition scanning
  nvme: disable CC.CRIME (NVME_CC_CRIME)
  nvme: delete unnecessary fallthru comment
  nvmet-rdma: use sbitmap to replace rsp free list
  nvme: tcp: avoid race between queue_lock lock and destroy
  nvmet-passthru: clear EUID/NGUID/UUID while using loop target
  block: fix blk_rq_map_integrity_sg kernel-doc
parents 42aafd8b 599d9f3a
...@@ -1292,14 +1292,12 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) ...@@ -1292,14 +1292,12 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
} }
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, static void nvme_keep_alive_finish(struct request *rq,
blk_status_t status) blk_status_t status, struct nvme_ctrl *ctrl)
{ {
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
bool startka = false;
unsigned long rtt = jiffies - (rq->deadline - rq->timeout); unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
unsigned long delay = nvme_keep_alive_work_period(ctrl); unsigned long delay = nvme_keep_alive_work_period(ctrl);
enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
/* /*
* Subtract off the keepalive RTT so nvme_keep_alive_work runs * Subtract off the keepalive RTT so nvme_keep_alive_work runs
...@@ -1313,25 +1311,17 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, ...@@ -1313,25 +1311,17 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
delay = 0; delay = 0;
} }
blk_mq_free_request(rq);
if (status) { if (status) {
dev_err(ctrl->device, dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n", "failed nvme_keep_alive_end_io error=%d\n",
status); status);
return RQ_END_IO_NONE; return;
} }
ctrl->ka_last_check_time = jiffies; ctrl->ka_last_check_time = jiffies;
ctrl->comp_seen = false; ctrl->comp_seen = false;
spin_lock_irqsave(&ctrl->lock, flags); if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING)
if (ctrl->state == NVME_CTRL_LIVE ||
ctrl->state == NVME_CTRL_CONNECTING)
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
return RQ_END_IO_NONE;
} }
static void nvme_keep_alive_work(struct work_struct *work) static void nvme_keep_alive_work(struct work_struct *work)
...@@ -1340,6 +1330,7 @@ static void nvme_keep_alive_work(struct work_struct *work) ...@@ -1340,6 +1330,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
struct nvme_ctrl, ka_work); struct nvme_ctrl, ka_work);
bool comp_seen = ctrl->comp_seen; bool comp_seen = ctrl->comp_seen;
struct request *rq; struct request *rq;
blk_status_t status;
ctrl->ka_last_check_time = jiffies; ctrl->ka_last_check_time = jiffies;
...@@ -1362,9 +1353,9 @@ static void nvme_keep_alive_work(struct work_struct *work) ...@@ -1362,9 +1353,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
nvme_init_request(rq, &ctrl->ka_cmd); nvme_init_request(rq, &ctrl->ka_cmd);
rq->timeout = ctrl->kato * HZ; rq->timeout = ctrl->kato * HZ;
rq->end_io = nvme_keep_alive_end_io; status = blk_execute_rq(rq, false);
rq->end_io_data = ctrl; nvme_keep_alive_finish(rq, status, ctrl);
blk_execute_rq_nowait(rq, false); blk_mq_free_request(rq);
} }
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
...@@ -2458,8 +2449,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) ...@@ -2458,8 +2449,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
else else
ctrl->ctrl_config = NVME_CC_CSS_NVM; ctrl->ctrl_config = NVME_CC_CSS_NVM;
if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) /*
ctrl->ctrl_config |= NVME_CC_CRIME; * Setting CRIME results in CSTS.RDY before the media is ready. This
* makes it possible for media related commands to return the error
* NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is
* restructured to handle retries, disable CC.CRIME.
*/
ctrl->ctrl_config &= ~NVME_CC_CRIME;
ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
...@@ -2489,9 +2485,6 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) ...@@ -2489,9 +2485,6 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
* devices are known to get this wrong. Use the larger of the * devices are known to get this wrong. Use the larger of the
* two values. * two values.
*/ */
if (ctrl->ctrl_config & NVME_CC_CRIME)
ready_timeout = NVME_CRTO_CRIMT(crto);
else
ready_timeout = NVME_CRTO_CRWMT(crto); ready_timeout = NVME_CRTO_CRWMT(crto);
if (ready_timeout < timeout) if (ready_timeout < timeout)
......
...@@ -431,7 +431,6 @@ static bool nvme_available_path(struct nvme_ns_head *head) ...@@ -431,7 +431,6 @@ static bool nvme_available_path(struct nvme_ns_head *head)
case NVME_CTRL_LIVE: case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING: case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING: case NVME_CTRL_CONNECTING:
/* fallthru */
return true; return true;
default: default:
break; break;
...@@ -580,6 +579,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) ...@@ -580,6 +579,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
return ret; return ret;
} }
static void nvme_partition_scan_work(struct work_struct *work)
{
struct nvme_ns_head *head =
container_of(work, struct nvme_ns_head, partition_scan_work);
if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN,
&head->disk->state)))
return;
mutex_lock(&head->disk->open_mutex);
bdev_disk_changed(head->disk, false);
mutex_unlock(&head->disk->open_mutex);
}
static void nvme_requeue_work(struct work_struct *work) static void nvme_requeue_work(struct work_struct *work)
{ {
struct nvme_ns_head *head = struct nvme_ns_head *head =
...@@ -606,6 +619,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ...@@ -606,6 +619,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
bio_list_init(&head->requeue_list); bio_list_init(&head->requeue_list);
spin_lock_init(&head->requeue_lock); spin_lock_init(&head->requeue_lock);
INIT_WORK(&head->requeue_work, nvme_requeue_work); INIT_WORK(&head->requeue_work, nvme_requeue_work);
INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work);
/* /*
* Add a multipath node if the subsystems supports multiple controllers. * Add a multipath node if the subsystems supports multiple controllers.
...@@ -629,6 +643,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ...@@ -629,6 +643,16 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
return PTR_ERR(head->disk); return PTR_ERR(head->disk);
head->disk->fops = &nvme_ns_head_ops; head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head; head->disk->private_data = head;
/*
* We need to suppress the partition scan from occuring within the
* controller's scan_work context. If a path error occurs here, the IO
* will wait until a path becomes available or all paths are torn down,
* but that action also occurs within scan_work, so it would deadlock.
* Defer the partion scan to a different context that does not block
* scan_work.
*/
set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state);
sprintf(head->disk->disk_name, "nvme%dn%d", sprintf(head->disk->disk_name, "nvme%dn%d",
ctrl->subsys->instance, head->instance); ctrl->subsys->instance, head->instance);
return 0; return 0;
...@@ -655,6 +679,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) ...@@ -655,6 +679,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
return; return;
} }
nvme_add_ns_head_cdev(head); nvme_add_ns_head_cdev(head);
kblockd_schedule_work(&head->partition_scan_work);
} }
mutex_lock(&head->lock); mutex_lock(&head->lock);
...@@ -974,14 +999,14 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) ...@@ -974,14 +999,14 @@ void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
return; return;
if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
nvme_cdev_del(&head->cdev, &head->cdev_device); nvme_cdev_del(&head->cdev, &head->cdev_device);
del_gendisk(head->disk);
}
/* /*
* requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
* to allow multipath to fail all I/O. * to allow multipath to fail all I/O.
*/ */
synchronize_srcu(&head->srcu); synchronize_srcu(&head->srcu);
kblockd_schedule_work(&head->requeue_work); kblockd_schedule_work(&head->requeue_work);
del_gendisk(head->disk);
}
} }
void nvme_mpath_remove_disk(struct nvme_ns_head *head) void nvme_mpath_remove_disk(struct nvme_ns_head *head)
...@@ -991,6 +1016,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) ...@@ -991,6 +1016,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
/* make sure all pending bios are cleaned up */ /* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work); kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work); flush_work(&head->requeue_work);
flush_work(&head->partition_scan_work);
put_disk(head->disk); put_disk(head->disk);
} }
......
...@@ -494,6 +494,7 @@ struct nvme_ns_head { ...@@ -494,6 +494,7 @@ struct nvme_ns_head {
struct bio_list requeue_list; struct bio_list requeue_list;
spinlock_t requeue_lock; spinlock_t requeue_lock;
struct work_struct requeue_work; struct work_struct requeue_work;
struct work_struct partition_scan_work;
struct mutex lock; struct mutex lock;
unsigned long flags; unsigned long flags;
#define NVME_NSHEAD_DISK_LIVE 0 #define NVME_NSHEAD_DISK_LIVE 0
......
...@@ -2506,17 +2506,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) ...@@ -2506,17 +2506,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
return 1; return 1;
} }
static void nvme_pci_update_nr_queues(struct nvme_dev *dev) static bool nvme_pci_update_nr_queues(struct nvme_dev *dev)
{ {
if (!dev->ctrl.tagset) { if (!dev->ctrl.tagset) {
nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
return; return true;
}
/* Give up if we are racing with nvme_dev_disable() */
if (!mutex_trylock(&dev->shutdown_lock))
return false;
/* Check if nvme_dev_disable() has been executed already */
if (!dev->online_queues) {
mutex_unlock(&dev->shutdown_lock);
return false;
} }
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
/* free previously allocated queues that are no longer usable */ /* free previously allocated queues that are no longer usable */
nvme_free_queues(dev, dev->online_queues); nvme_free_queues(dev, dev->online_queues);
mutex_unlock(&dev->shutdown_lock);
return true;
} }
static int nvme_pci_enable(struct nvme_dev *dev) static int nvme_pci_enable(struct nvme_dev *dev)
...@@ -2797,7 +2809,8 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -2797,7 +2809,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_dbbuf_set(dev); nvme_dbbuf_set(dev);
nvme_unquiesce_io_queues(&dev->ctrl); nvme_unquiesce_io_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl); nvme_wait_freeze(&dev->ctrl);
nvme_pci_update_nr_queues(dev); if (!nvme_pci_update_nr_queues(dev))
goto out;
nvme_unfreeze(&dev->ctrl); nvme_unfreeze(&dev->ctrl);
} else { } else {
dev_warn(dev->ctrl.device, "IO queues lost\n"); dev_warn(dev->ctrl.device, "IO queues lost\n");
......
...@@ -2644,10 +2644,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) ...@@ -2644,10 +2644,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len = nvmf_get_address(ctrl, buf, size); len = nvmf_get_address(ctrl, buf, size);
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return len;
mutex_lock(&queue->queue_lock); mutex_lock(&queue->queue_lock);
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
goto done;
ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
if (ret > 0) { if (ret > 0) {
if (len > 0) if (len > 0)
...@@ -2655,7 +2656,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) ...@@ -2655,7 +2656,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
(len) ? "," : "", &src_addr); (len) ? "," : "", &src_addr);
} }
done:
mutex_unlock(&queue->queue_lock); mutex_unlock(&queue->queue_lock);
return len; return len;
......
...@@ -265,6 +265,13 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -265,6 +265,13 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{ {
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return; return;
/*
* It's possible that some requests might have been added
* after admin queue is stopped/quiesced. So now start the
* queue to flush these requests to the completion.
*/
nvme_unquiesce_admin_queue(&ctrl->ctrl);
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
nvme_remove_admin_tag_set(&ctrl->ctrl); nvme_remove_admin_tag_set(&ctrl->ctrl);
} }
...@@ -297,6 +304,12 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -297,6 +304,12 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
} }
ctrl->ctrl.queue_count = 1; ctrl->ctrl.queue_count = 1;
/*
* It's possible that some requests might have been added
* after io queue is stopped/quiesced. So now start the
* queue to flush these requests to the completion.
*/
nvme_unquiesce_io_queues(&ctrl->ctrl);
} }
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
......
...@@ -535,10 +535,6 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) ...@@ -535,10 +535,6 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
break; break;
case nvme_admin_identify: case nvme_admin_identify:
switch (req->cmd->identify.cns) { switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true;
return NVME_SC_SUCCESS;
case NVME_ID_CNS_CS_CTRL: case NVME_ID_CNS_CS_CTRL:
switch (req->cmd->identify.csi) { switch (req->cmd->identify.csi) {
case NVME_CSI_ZNS: case NVME_CSI_ZNS:
...@@ -547,7 +543,9 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) ...@@ -547,7 +543,9 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
} }
return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
case NVME_ID_CNS_CTRL:
case NVME_ID_CNS_NS: case NVME_ID_CNS_NS:
case NVME_ID_CNS_NS_DESC_LIST:
req->execute = nvmet_passthru_execute_cmd; req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true; req->p.use_workqueue = true;
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
#define NVMET_RDMA_BACKLOG 128 #define NVMET_RDMA_BACKLOG 128
#define NVMET_RDMA_DISCRETE_RSP_TAG -1
struct nvmet_rdma_srq; struct nvmet_rdma_srq;
struct nvmet_rdma_cmd { struct nvmet_rdma_cmd {
...@@ -75,7 +77,7 @@ struct nvmet_rdma_rsp { ...@@ -75,7 +77,7 @@ struct nvmet_rdma_rsp {
u32 invalidate_rkey; u32 invalidate_rkey;
struct list_head wait_list; struct list_head wait_list;
struct list_head free_list; int tag;
}; };
enum nvmet_rdma_queue_state { enum nvmet_rdma_queue_state {
...@@ -98,8 +100,7 @@ struct nvmet_rdma_queue { ...@@ -98,8 +100,7 @@ struct nvmet_rdma_queue {
struct nvmet_sq nvme_sq; struct nvmet_sq nvme_sq;
struct nvmet_rdma_rsp *rsps; struct nvmet_rdma_rsp *rsps;
struct list_head free_rsps; struct sbitmap rsp_tags;
spinlock_t rsps_lock;
struct nvmet_rdma_cmd *cmds; struct nvmet_rdma_cmd *cmds;
struct work_struct release_work; struct work_struct release_work;
...@@ -172,7 +173,8 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); ...@@ -172,7 +173,8 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r); struct nvmet_rdma_rsp *r);
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r); struct nvmet_rdma_rsp *r,
int tag);
static const struct nvmet_fabrics_ops nvmet_rdma_ops; static const struct nvmet_fabrics_ops nvmet_rdma_ops;
...@@ -210,15 +212,12 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) ...@@ -210,15 +212,12 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
static inline struct nvmet_rdma_rsp * static inline struct nvmet_rdma_rsp *
nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
{ {
struct nvmet_rdma_rsp *rsp; struct nvmet_rdma_rsp *rsp = NULL;
unsigned long flags; int tag;
spin_lock_irqsave(&queue->rsps_lock, flags); tag = sbitmap_get(&queue->rsp_tags);
rsp = list_first_entry_or_null(&queue->free_rsps, if (tag >= 0)
struct nvmet_rdma_rsp, free_list); rsp = &queue->rsps[tag];
if (likely(rsp))
list_del(&rsp->free_list);
spin_unlock_irqrestore(&queue->rsps_lock, flags);
if (unlikely(!rsp)) { if (unlikely(!rsp)) {
int ret; int ret;
...@@ -226,13 +225,12 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) ...@@ -226,13 +225,12 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
if (unlikely(!rsp)) if (unlikely(!rsp))
return NULL; return NULL;
ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); ret = nvmet_rdma_alloc_rsp(queue->dev, rsp,
NVMET_RDMA_DISCRETE_RSP_TAG);
if (unlikely(ret)) { if (unlikely(ret)) {
kfree(rsp); kfree(rsp);
return NULL; return NULL;
} }
rsp->allocated = true;
} }
return rsp; return rsp;
...@@ -241,17 +239,13 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) ...@@ -241,17 +239,13 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
static inline void static inline void
nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
{ {
unsigned long flags; if (unlikely(rsp->tag == NVMET_RDMA_DISCRETE_RSP_TAG)) {
if (unlikely(rsp->allocated)) {
nvmet_rdma_free_rsp(rsp->queue->dev, rsp); nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
kfree(rsp); kfree(rsp);
return; return;
} }
spin_lock_irqsave(&rsp->queue->rsps_lock, flags); sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag);
list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
} }
static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
...@@ -404,7 +398,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, ...@@ -404,7 +398,7 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
} }
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r) struct nvmet_rdma_rsp *r, int tag)
{ {
/* NVMe CQE / RDMA SEND */ /* NVMe CQE / RDMA SEND */
r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
...@@ -432,6 +426,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, ...@@ -432,6 +426,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
r->read_cqe.done = nvmet_rdma_read_data_done; r->read_cqe.done = nvmet_rdma_read_data_done;
/* Data Out / RDMA WRITE */ /* Data Out / RDMA WRITE */
r->write_cqe.done = nvmet_rdma_write_data_done; r->write_cqe.done = nvmet_rdma_write_data_done;
r->tag = tag;
return 0; return 0;
...@@ -454,21 +449,23 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) ...@@ -454,21 +449,23 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
{ {
struct nvmet_rdma_device *ndev = queue->dev; struct nvmet_rdma_device *ndev = queue->dev;
int nr_rsps = queue->recv_queue_size * 2; int nr_rsps = queue->recv_queue_size * 2;
int ret = -EINVAL, i; int ret = -ENOMEM, i;
if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL,
NUMA_NO_NODE, false, true))
goto out;
queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
GFP_KERNEL); GFP_KERNEL);
if (!queue->rsps) if (!queue->rsps)
goto out; goto out_free_sbitmap;
for (i = 0; i < nr_rsps; i++) { for (i = 0; i < nr_rsps; i++) {
struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
ret = nvmet_rdma_alloc_rsp(ndev, rsp); ret = nvmet_rdma_alloc_rsp(ndev, rsp, i);
if (ret) if (ret)
goto out_free; goto out_free;
list_add_tail(&rsp->free_list, &queue->free_rsps);
} }
return 0; return 0;
...@@ -477,6 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) ...@@ -477,6 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
while (--i >= 0) while (--i >= 0)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps); kfree(queue->rsps);
out_free_sbitmap:
sbitmap_free(&queue->rsp_tags);
out: out:
return ret; return ret;
} }
...@@ -489,6 +488,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) ...@@ -489,6 +488,7 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
for (i = 0; i < nr_rsps; i++) for (i = 0; i < nr_rsps; i++)
nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
kfree(queue->rsps); kfree(queue->rsps);
sbitmap_free(&queue->rsp_tags);
} }
static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
...@@ -1447,8 +1447,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, ...@@ -1447,8 +1447,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
INIT_LIST_HEAD(&queue->rsp_wait_list); INIT_LIST_HEAD(&queue->rsp_wait_list);
INIT_LIST_HEAD(&queue->rsp_wr_wait_list); INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
spin_lock_init(&queue->rsp_wr_wait_lock); spin_lock_init(&queue->rsp_wr_wait_lock);
INIT_LIST_HEAD(&queue->free_rsps);
spin_lock_init(&queue->rsps_lock);
INIT_LIST_HEAD(&queue->queue_list); INIT_LIST_HEAD(&queue->queue_list);
queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment