Commit 6143393c authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.2' of git://git.infradead.org/nvme into for-5.2/block

Pull NVMe updates from Christoph.

* 'nvme-5.2' of git://git.infradead.org/nvme:
  nvmet: protect discovery change log event list iteration
  nvme: mark nvme_core_init and nvme_core_exit static
  nvme: move command size checks to the core
  nvme-fabrics: check more command sizes
  nvme-pci: check more command sizes
  nvme-pci: remove an unneeded variable initialization
  nvme-pci: unquiesce admin queue on shutdown
  nvme-pci: shutdown on timeout during deletion
  nvme-pci: fix psdt field for single segment sgls
  nvme-multipath: don't print ANA group state by default
  nvme-multipath: split bios with the ns_head bio_set before submitting
  nvme-tcp: fix possible null deref on a timed out io queue connect
parents 273938bf 6f53e73b
...@@ -3879,10 +3879,37 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) ...@@ -3879,10 +3879,37 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
} }
EXPORT_SYMBOL_GPL(nvme_start_queues); EXPORT_SYMBOL_GPL(nvme_start_queues);
int __init nvme_core_init(void) /*
* Check we didn't inadvertently grow the command structure sizes:
*/
static inline void _nvme_check_size(void)
{
BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
}
static int __init nvme_core_init(void)
{ {
int result = -ENOMEM; int result = -ENOMEM;
_nvme_check_size();
nvme_wq = alloc_workqueue("nvme-wq", nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq) if (!nvme_wq)
...@@ -3929,7 +3956,7 @@ int __init nvme_core_init(void) ...@@ -3929,7 +3956,7 @@ int __init nvme_core_init(void)
return result; return result;
} }
void __exit nvme_core_exit(void) static void __exit nvme_core_exit(void)
{ {
ida_destroy(&nvme_subsystems_ida); ida_destroy(&nvme_subsystems_ida);
class_destroy(nvme_subsys_class); class_destroy(nvme_subsys_class);
......
...@@ -1188,6 +1188,7 @@ static void __exit nvmf_exit(void) ...@@ -1188,6 +1188,7 @@ static void __exit nvmf_exit(void)
class_destroy(nvmf_class); class_destroy(nvmf_class);
nvmf_host_put(nvmf_default_host); nvmf_host_put(nvmf_default_host);
BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64); BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
......
...@@ -232,6 +232,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, ...@@ -232,6 +232,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx; int srcu_idx;
/*
* The namespace might be going away and the bio might
* be moved to a different queue via blk_steal_bios(),
* so we need to use the bio_split pool from the original
* queue to allocate the bvecs from.
*/
blk_queue_split(q, &bio);
srcu_idx = srcu_read_lock(&head->srcu); srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head); ns = nvme_find_path(head);
if (likely(ns)) { if (likely(ns)) {
...@@ -421,7 +429,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, ...@@ -421,7 +429,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
unsigned *nr_change_groups = data; unsigned *nr_change_groups = data;
struct nvme_ns *ns; struct nvme_ns *ns;
dev_info(ctrl->device, "ANA group %d: %s.\n", dev_dbg(ctrl->device, "ANA group %d: %s.\n",
le32_to_cpu(desc->grpid), le32_to_cpu(desc->grpid),
nvme_ana_state_names[desc->state]); nvme_ana_state_names[desc->state]);
......
...@@ -577,7 +577,4 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) ...@@ -577,7 +577,4 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
return dev_to_disk(dev)->private_data; return dev_to_disk(dev)->private_data;
} }
int __init nvme_core_init(void);
void __exit nvme_core_exit(void);
#endif /* _NVME_H */ #endif /* _NVME_H */
...@@ -146,7 +146,7 @@ static int io_queue_depth_set(const char *val, const struct kernel_param *kp) ...@@ -146,7 +146,7 @@ static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
static int queue_count_set(const char *val, const struct kernel_param *kp) static int queue_count_set(const char *val, const struct kernel_param *kp)
{ {
int n = 0, ret; int n, ret;
ret = kstrtoint(val, 10, &n); ret = kstrtoint(val, 10, &n);
if (ret) if (ret)
...@@ -226,26 +226,6 @@ struct nvme_iod { ...@@ -226,26 +226,6 @@ struct nvme_iod {
struct scatterlist *sg; struct scatterlist *sg;
}; };
/*
* Check we didin't inadvertently grow the command struct
*/
static inline void _nvme_check_size(void)
{
BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
}
static unsigned int max_io_queues(void) static unsigned int max_io_queues(void)
{ {
return num_possible_cpus() + write_queues + poll_queues; return num_possible_cpus() + write_queues + poll_queues;
...@@ -830,6 +810,7 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, ...@@ -830,6 +810,7 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
iod->dma_len = bv->bv_len; iod->dma_len = bv->bv_len;
cmnd->flags = NVME_CMD_SGL_METABUF;
cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
...@@ -1276,6 +1257,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1276,6 +1257,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_dev *dev = nvmeq->dev; struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req; struct request *abort_req;
struct nvme_command cmd; struct nvme_command cmd;
bool shutdown = false;
u32 csts = readl(dev->bar + NVME_REG_CSTS); u32 csts = readl(dev->bar + NVME_REG_CSTS);
/* If PCI error recovery process is happening, we cannot reset or /* If PCI error recovery process is happening, we cannot reset or
...@@ -1312,12 +1294,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1312,12 +1294,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* shutdown, so we return BLK_EH_DONE. * shutdown, so we return BLK_EH_DONE.
*/ */
switch (dev->ctrl.state) { switch (dev->ctrl.state) {
case NVME_CTRL_DELETING:
shutdown = true;
case NVME_CTRL_CONNECTING: case NVME_CTRL_CONNECTING:
case NVME_CTRL_RESETTING: case NVME_CTRL_RESETTING:
dev_warn_ratelimited(dev->ctrl.device, dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n", "I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
nvme_dev_disable(dev, false); nvme_dev_disable(dev, shutdown);
nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_DONE; return BLK_EH_DONE;
default: default:
...@@ -2433,8 +2417,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) ...@@ -2433,8 +2417,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* must flush all entered requests to their failed completion to avoid * must flush all entered requests to their failed completion to avoid
* deadlocking blk-mq hot-cpu notifier. * deadlocking blk-mq hot-cpu notifier.
*/ */
if (shutdown) if (shutdown) {
nvme_start_queues(&dev->ctrl); nvme_start_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
blk_mq_unquiesce_queue(dev->ctrl.admin_q);
}
mutex_unlock(&dev->shutdown_lock); mutex_unlock(&dev->shutdown_lock);
} }
...@@ -2974,6 +2961,9 @@ static struct pci_driver nvme_driver = { ...@@ -2974,6 +2961,9 @@ static struct pci_driver nvme_driver = {
static int __init nvme_init(void) static int __init nvme_init(void)
{ {
BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
return pci_register_driver(&nvme_driver); return pci_register_driver(&nvme_driver);
} }
...@@ -2982,7 +2972,6 @@ static void __exit nvme_exit(void) ...@@ -2982,7 +2972,6 @@ static void __exit nvme_exit(void)
{ {
pci_unregister_driver(&nvme_driver); pci_unregister_driver(&nvme_driver);
flush_workqueue(nvme_wq); flush_workqueue(nvme_wq);
_nvme_check_size();
} }
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
......
...@@ -1423,6 +1423,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) ...@@ -1423,6 +1423,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
if (!ret) { if (!ret) {
set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
} else { } else {
if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
__nvme_tcp_stop_queue(&ctrl->queues[idx]); __nvme_tcp_stop_queue(&ctrl->queues[idx]);
dev_err(nctrl->device, dev_err(nctrl->device,
"failed to connect queue: %d ret=%d\n", idx, ret); "failed to connect queue: %d ret=%d\n", idx, ret);
......
...@@ -30,14 +30,17 @@ void nvmet_port_disc_changed(struct nvmet_port *port, ...@@ -30,14 +30,17 @@ void nvmet_port_disc_changed(struct nvmet_port *port,
{ {
struct nvmet_ctrl *ctrl; struct nvmet_ctrl *ctrl;
lockdep_assert_held(&nvmet_config_sem);
nvmet_genctr++; nvmet_genctr++;
mutex_lock(&nvmet_disc_subsys->lock);
list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn)) if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
continue; continue;
__nvmet_disc_changed(port, ctrl); __nvmet_disc_changed(port, ctrl);
} }
mutex_unlock(&nvmet_disc_subsys->lock);
} }
static void __nvmet_subsys_disc_changed(struct nvmet_port *port, static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
...@@ -46,12 +49,14 @@ static void __nvmet_subsys_disc_changed(struct nvmet_port *port, ...@@ -46,12 +49,14 @@ static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
{ {
struct nvmet_ctrl *ctrl; struct nvmet_ctrl *ctrl;
mutex_lock(&nvmet_disc_subsys->lock);
list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) { list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn)) if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
continue; continue;
__nvmet_disc_changed(port, ctrl); __nvmet_disc_changed(port, ctrl);
} }
mutex_unlock(&nvmet_disc_subsys->lock);
} }
void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment