Commit 04b1ecb6 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.11-2021-01-07' of git://git.infradead.org/nvme into block-5.11

Pull NVMe updates from Christoph:

"nvme updates for 5.11:

 - fix a race in the nvme-tcp send code (Sagi Grimberg)
 - fix a list corruption in an nvme-rdma error path (Israel Rukshin)
 - avoid a possible double fetch in nvme-pci (Lalithambika Krishnakumar)
 - add the susystem NQN quirk for a Samsung driver (Gopal Tiwari)
 - fix two compiler warnings in nvme-fcloop (James Smart)
 - don't call sleeping functions from irq context in nvme-fc (James Smart)
 - remove an unused argument (Max Gurtovoy)
 - remove unused exports (Minwoo Im)"

* tag 'nvme-5.11-2021-01-07' of git://git.infradead.org/nvme:
  nvme: remove the unused status argument from nvme_trace_bio_complete
  nvmet-rdma: Fix list_del corruption on queue establishment failure
  nvme: unexport functions with no external caller
  nvme: avoid possible double fetch in handling CQE
  nvme-tcp: Fix possible race of io_work and direct send
  nvme-pci: mark Samsung PM1725a as IGNORE_DEV_SUBNQN
  nvme-fcloop: Fix sscanf type and list_first_entry_or_null warnings
  nvme-fc: avoid calling _nvme_fc_abort_outstanding_ios from interrupt context
parents 04a6a536 2b59787a
...@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl) ...@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
} }
EXPORT_SYMBOL_GPL(nvme_reset_ctrl); EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{ {
int ret; int ret;
...@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) ...@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{ {
...@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req) ...@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata, req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64)); le64_to_cpu(nvme_req(req)->result.u64));
nvme_trace_bio_complete(req, status); nvme_trace_bio_complete(req);
blk_mq_end_request(req, status); blk_mq_end_request(req, status);
} }
...@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q, ...@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
} }
EXPORT_SYMBOL_GPL(nvme_alloc_request); EXPORT_SYMBOL_GPL(nvme_alloc_request);
struct request *nvme_alloc_request_qid(struct request_queue *q, static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{ {
struct request *req; struct request *req;
...@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q, ...@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
nvme_init_request(req, cmd); nvme_init_request(req, cmd);
return req; return req;
} }
EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{ {
......
...@@ -166,6 +166,7 @@ struct nvme_fc_ctrl { ...@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set; struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set; struct blk_mq_tag_set tag_set;
struct work_struct ioerr_work;
struct delayed_work connect_work; struct delayed_work connect_work;
struct kref ref; struct kref ref;
...@@ -1888,6 +1889,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, ...@@ -1888,6 +1889,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
} }
} }
static void
nvme_fc_ctrl_ioerr_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, ioerr_work);
nvme_fc_error_recovery(ctrl, "transport detected io error");
}
static void static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{ {
...@@ -2046,7 +2056,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) ...@@ -2046,7 +2056,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
check_error: check_error:
if (terminate_assoc) if (terminate_assoc)
nvme_fc_error_recovery(ctrl, "transport detected io error"); queue_work(nvme_reset_wq, &ctrl->ioerr_work);
} }
static int static int
...@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) ...@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{ {
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work); cancel_delayed_work_sync(&ctrl->connect_work);
/* /*
* kill the association on the link side. this will block * kill the association on the link side. this will block
...@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock); spin_lock_init(&ctrl->lock);
/* io queue count */ /* io queue count */
...@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl: fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work); cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work); cancel_delayed_work_sync(&ctrl->connect_work);
......
...@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl); ...@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1 #define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q, struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags); struct nvme_command *cmd, blk_mq_req_flags_t flags);
struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req); void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd); struct nvme_command *cmd);
...@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, ...@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl); int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
...@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) ...@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work); kblockd_schedule_work(&head->requeue_work);
} }
static inline void nvme_trace_bio_complete(struct request *req, static inline void nvme_trace_bio_complete(struct request *req)
blk_status_t status)
{ {
struct nvme_ns *ns = req->q->queuedata; struct nvme_ns *ns = req->q->queuedata;
...@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) ...@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{ {
} }
static inline void nvme_trace_bio_complete(struct request *req, static inline void nvme_trace_bio_complete(struct request *req)
blk_status_t status)
{ {
} }
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
......
...@@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) ...@@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{ {
struct nvme_completion *cqe = &nvmeq->cqes[idx]; struct nvme_completion *cqe = &nvmeq->cqes[idx];
__u16 command_id = READ_ONCE(cqe->command_id);
struct request *req; struct request *req;
/* /*
...@@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) ...@@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request * aborts. We don't even bother to allocate a struct request
* for them but rather special case them here. * for them but rather special case them here.
*/ */
if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl, nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result); cqe->status, &cqe->result);
return; return;
} }
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) { if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device, dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n", "invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpu(cqe->sq_id)); command_id, le16_to_cpu(cqe->sq_id));
return; return;
} }
...@@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, }, .driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
......
...@@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req, ...@@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
} }
} }
static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
{
int ret;
/* drain the send queue as much as we can... */
do {
ret = nvme_tcp_try_send(queue);
} while (ret > 0);
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last) bool sync, bool last)
{ {
...@@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, ...@@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
if (queue->io_cpu == smp_processor_id() && if (queue->io_cpu == smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) { sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last; queue->more_requests = !last;
nvme_tcp_try_send(queue); nvme_tcp_send_all(queue);
queue->more_requests = false; queue->more_requests = false;
mutex_unlock(&queue->send_mutex); mutex_unlock(&queue->send_mutex);
} else if (last) { } else if (last) {
......
...@@ -1501,7 +1501,8 @@ static ssize_t ...@@ -1501,7 +1501,8 @@ static ssize_t
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr, fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int opcode, starting, amount; unsigned int opcode;
int starting, amount;
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3) if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
return -EBADRQC; return -EBADRQC;
...@@ -1588,8 +1589,8 @@ static int __init fcloop_init(void) ...@@ -1588,8 +1589,8 @@ static int __init fcloop_init(void)
static void __exit fcloop_exit(void) static void __exit fcloop_exit(void)
{ {
struct fcloop_lport *lport; struct fcloop_lport *lport = NULL;
struct fcloop_nport *nport; struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport; struct fcloop_tport *tport;
struct fcloop_rport *rport; struct fcloop_rport *rport;
unsigned long flags; unsigned long flags;
......
...@@ -1641,6 +1641,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) ...@@ -1641,6 +1641,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags); spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) { switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING: case NVMET_RDMA_Q_CONNECTING:
while (!list_empty(&queue->rsp_wait_list)) {
struct nvmet_rdma_rsp *rsp;
rsp = list_first_entry(&queue->rsp_wait_list,
struct nvmet_rdma_rsp,
wait_list);
list_del(&rsp->wait_list);
nvmet_rdma_put_rsp(rsp);
}
fallthrough;
case NVMET_RDMA_Q_LIVE: case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING; queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true; disconnect = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment