Commit 550203e6 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.16' of git://git.infradead.org/nvme into for-4.16/block

Pull NVMe fixes from Christoph:

"Below are the pending nvme updates for Linux 4.16. Just fixes and
 cleanups from various contributors this time around."
parents fb350e0a b837b283
...@@ -232,6 +232,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, ...@@ -232,6 +232,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
old_state = ctrl->state; old_state = ctrl->state;
switch (new_state) { switch (new_state) {
case NVME_CTRL_ADMIN_ONLY:
switch (old_state) {
case NVME_CTRL_RESETTING:
changed = true;
/* FALLTHRU */
default:
break;
}
break;
case NVME_CTRL_LIVE: case NVME_CTRL_LIVE:
switch (old_state) { switch (old_state) {
case NVME_CTRL_NEW: case NVME_CTRL_NEW:
...@@ -247,6 +256,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, ...@@ -247,6 +256,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) { switch (old_state) {
case NVME_CTRL_NEW: case NVME_CTRL_NEW:
case NVME_CTRL_LIVE: case NVME_CTRL_LIVE:
case NVME_CTRL_ADMIN_ONLY:
changed = true; changed = true;
/* FALLTHRU */ /* FALLTHRU */
default: default:
...@@ -266,6 +276,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, ...@@ -266,6 +276,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING: case NVME_CTRL_DELETING:
switch (old_state) { switch (old_state) {
case NVME_CTRL_LIVE: case NVME_CTRL_LIVE:
case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING: case NVME_CTRL_RESETTING:
case NVME_CTRL_RECONNECTING: case NVME_CTRL_RECONNECTING:
changed = true; changed = true;
...@@ -1217,16 +1228,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode) ...@@ -1217,16 +1228,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
/* should never be called due to GENHD_FL_HIDDEN */ /* should never be called due to GENHD_FL_HIDDEN */
if (WARN_ON_ONCE(ns->head->disk)) if (WARN_ON_ONCE(ns->head->disk))
return -ENXIO; goto fail;
#endif #endif
if (!kref_get_unless_zero(&ns->kref)) if (!kref_get_unless_zero(&ns->kref))
return -ENXIO; goto fail;
if (!try_module_get(ns->ctrl->ops->module))
goto fail_put_ns;
return 0; return 0;
fail_put_ns:
nvme_put_ns(ns);
fail:
return -ENXIO;
} }
static void nvme_release(struct gendisk *disk, fmode_t mode) static void nvme_release(struct gendisk *disk, fmode_t mode)
{ {
nvme_put_ns(disk->private_data); struct nvme_ns *ns = disk->private_data;
module_put(ns->ctrl->ops->module);
nvme_put_ns(ns);
} }
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
...@@ -2047,6 +2069,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = { ...@@ -2047,6 +2069,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL, NULL,
}; };
static int nvme_active_ctrls(struct nvme_subsystem *subsys)
{
int count = 0;
struct nvme_ctrl *ctrl;
mutex_lock(&subsys->lock);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
if (ctrl->state != NVME_CTRL_DELETING &&
ctrl->state != NVME_CTRL_DEAD)
count++;
}
mutex_unlock(&subsys->lock);
return count;
}
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{ {
struct nvme_subsystem *subsys, *found; struct nvme_subsystem *subsys, *found;
...@@ -2085,7 +2123,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -2085,7 +2123,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple * Verify that the subsystem actually supports multiple
* controllers, else bail out. * controllers, else bail out.
*/ */
if (!(id->cmic & (1 << 1))) { if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device, dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n", "ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn); found->subnqn);
...@@ -2252,7 +2290,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -2252,7 +2290,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
shutdown_timeout, 60); shutdown_timeout, 60);
if (ctrl->shutdown_timeout != shutdown_timeout) if (ctrl->shutdown_timeout != shutdown_timeout)
dev_warn(ctrl->device, dev_info(ctrl->device,
"Shutdown timeout set to %u seconds\n", "Shutdown timeout set to %u seconds\n",
ctrl->shutdown_timeout); ctrl->shutdown_timeout);
} else } else
...@@ -2336,8 +2374,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file) ...@@ -2336,8 +2374,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
struct nvme_ctrl *ctrl = struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev); container_of(inode->i_cdev, struct nvme_ctrl, cdev);
if (ctrl->state != NVME_CTRL_LIVE) switch (ctrl->state) {
case NVME_CTRL_LIVE:
case NVME_CTRL_ADMIN_ONLY:
break;
default:
return -EWOULDBLOCK; return -EWOULDBLOCK;
}
file->private_data = ctrl; file->private_data = ctrl;
return 0; return 0;
} }
...@@ -2601,6 +2645,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, ...@@ -2601,6 +2645,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
static const char *const state_name[] = { static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new", [NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live", [NVME_CTRL_LIVE] = "live",
[NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting", [NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_RECONNECTING]= "reconnecting", [NVME_CTRL_RECONNECTING]= "reconnecting",
[NVME_CTRL_DELETING] = "deleting", [NVME_CTRL_DELETING] = "deleting",
...@@ -3073,6 +3118,8 @@ static void nvme_scan_work(struct work_struct *work) ...@@ -3073,6 +3118,8 @@ static void nvme_scan_work(struct work_struct *work)
if (ctrl->state != NVME_CTRL_LIVE) if (ctrl->state != NVME_CTRL_LIVE)
return; return;
WARN_ON_ONCE(!ctrl->tagset);
if (nvme_identify_ctrl(ctrl, &id)) if (nvme_identify_ctrl(ctrl, &id))
return; return;
...@@ -3093,8 +3140,7 @@ static void nvme_scan_work(struct work_struct *work) ...@@ -3093,8 +3140,7 @@ static void nvme_scan_work(struct work_struct *work)
void nvme_queue_scan(struct nvme_ctrl *ctrl) void nvme_queue_scan(struct nvme_ctrl *ctrl)
{ {
/* /*
* Do not queue new scan work when a controller is reset during * Only new queue scan work when admin and IO queues are both alive
* removal.
*/ */
if (ctrl->state == NVME_CTRL_LIVE) if (ctrl->state == NVME_CTRL_LIVE)
queue_work(nvme_wq, &ctrl->scan_work); queue_work(nvme_wq, &ctrl->scan_work);
......
...@@ -492,7 +492,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect); ...@@ -492,7 +492,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
*/ */
int nvmf_register_transport(struct nvmf_transport_ops *ops) int nvmf_register_transport(struct nvmf_transport_ops *ops)
{ {
if (!ops->create_ctrl) if (!ops->create_ctrl || !ops->module)
return -EINVAL; return -EINVAL;
down_write(&nvmf_transports_rwsem); down_write(&nvmf_transports_rwsem);
...@@ -868,32 +868,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) ...@@ -868,32 +868,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_unlock; goto out_unlock;
} }
if (!try_module_get(ops->module)) {
ret = -EBUSY;
goto out_unlock;
}
ret = nvmf_check_required_opts(opts, ops->required_opts); ret = nvmf_check_required_opts(opts, ops->required_opts);
if (ret) if (ret)
goto out_unlock; goto out_module_put;
ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS | ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
ops->allowed_opts | ops->required_opts); ops->allowed_opts | ops->required_opts);
if (ret) if (ret)
goto out_unlock; goto out_module_put;
ctrl = ops->create_ctrl(dev, opts); ctrl = ops->create_ctrl(dev, opts);
if (IS_ERR(ctrl)) { if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl); ret = PTR_ERR(ctrl);
goto out_unlock; goto out_module_put;
} }
if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) { if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
dev_warn(ctrl->device, dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n", "controller returned incorrect NQN: \"%s\".\n",
ctrl->subsys->subnqn); ctrl->subsys->subnqn);
module_put(ops->module);
up_read(&nvmf_transports_rwsem); up_read(&nvmf_transports_rwsem);
nvme_delete_ctrl_sync(ctrl); nvme_delete_ctrl_sync(ctrl);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
module_put(ops->module);
up_read(&nvmf_transports_rwsem); up_read(&nvmf_transports_rwsem);
return ctrl; return ctrl;
out_module_put:
module_put(ops->module);
out_unlock: out_unlock:
up_read(&nvmf_transports_rwsem); up_read(&nvmf_transports_rwsem);
out_free_opts: out_free_opts:
......
...@@ -108,6 +108,7 @@ struct nvmf_ctrl_options { ...@@ -108,6 +108,7 @@ struct nvmf_ctrl_options {
* fabric implementation of NVMe fabrics. * fabric implementation of NVMe fabrics.
* @entry: Used by the fabrics library to add the new * @entry: Used by the fabrics library to add the new
* registration entry to its linked-list internal tree. * registration entry to its linked-list internal tree.
* @module: Transport module reference
* @name: Name of the NVMe fabric driver implementation. * @name: Name of the NVMe fabric driver implementation.
* @required_opts: sysfs command-line options that must be specified * @required_opts: sysfs command-line options that must be specified
* when adding a new NVMe controller. * when adding a new NVMe controller.
...@@ -126,6 +127,7 @@ struct nvmf_ctrl_options { ...@@ -126,6 +127,7 @@ struct nvmf_ctrl_options {
*/ */
struct nvmf_transport_ops { struct nvmf_transport_ops {
struct list_head entry; struct list_head entry;
struct module *module;
const char *name; const char *name;
int required_opts; int required_opts;
int allowed_opts; int allowed_opts;
......
...@@ -3381,6 +3381,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) ...@@ -3381,6 +3381,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
static struct nvmf_transport_ops nvme_fc_transport = { static struct nvmf_transport_ops nvme_fc_transport = {
.name = "fc", .name = "fc",
.module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
.allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
.create_ctrl = nvme_fc_create_ctrl, .create_ctrl = nvme_fc_create_ctrl,
......
...@@ -119,6 +119,7 @@ static inline struct nvme_request *nvme_req(struct request *req) ...@@ -119,6 +119,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
enum nvme_ctrl_state { enum nvme_ctrl_state {
NVME_CTRL_NEW, NVME_CTRL_NEW,
NVME_CTRL_LIVE, NVME_CTRL_LIVE,
NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING, NVME_CTRL_RESETTING,
NVME_CTRL_RECONNECTING, NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING, NVME_CTRL_DELETING,
......
...@@ -1770,7 +1770,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, ...@@ -1770,7 +1770,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
dma_addr_t descs_dma; dma_addr_t descs_dma;
int i = 0; int i = 0;
void **bufs; void **bufs;
u64 size = 0, tmp; u64 size, tmp;
tmp = (preferred + chunk_size - 1); tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size); do_div(tmp, chunk_size);
...@@ -1853,7 +1853,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) ...@@ -1853,7 +1853,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
u64 preferred = (u64)dev->ctrl.hmpre * 4096; u64 preferred = (u64)dev->ctrl.hmpre * 4096;
u64 min = (u64)dev->ctrl.hmmin * 4096; u64 min = (u64)dev->ctrl.hmmin * 4096;
u32 enable_bits = NVME_HOST_MEM_ENABLE; u32 enable_bits = NVME_HOST_MEM_ENABLE;
int ret = 0; int ret;
preferred = min(preferred, max); preferred = min(preferred, max);
if (min > max) { if (min > max) {
...@@ -2035,13 +2035,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) ...@@ -2035,13 +2035,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
} }
/* /*
* Return: error value if an error occurred setting up the queues or calling * return error value only when tagset allocation failed
* Identify Device. 0 if these succeeded, even if adding some of the
* namespaces failed. At the moment, these failures are silent. TBD which
* failures should be reported.
*/ */
static int nvme_dev_add(struct nvme_dev *dev) static int nvme_dev_add(struct nvme_dev *dev)
{ {
int ret;
if (!dev->ctrl.tagset) { if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops; dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1; dev->tagset.nr_hw_queues = dev->online_queues - 1;
...@@ -2057,8 +2056,12 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2057,8 +2056,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev; dev->tagset.driver_data = dev;
if (blk_mq_alloc_tag_set(&dev->tagset)) ret = blk_mq_alloc_tag_set(&dev->tagset);
return 0; if (ret) {
dev_warn(dev->ctrl.device,
"IO queues tagset allocation failed %d\n", ret);
return ret;
}
dev->ctrl.tagset = &dev->tagset; dev->ctrl.tagset = &dev->tagset;
nvme_dbbuf_set(dev); nvme_dbbuf_set(dev);
...@@ -2291,6 +2294,7 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -2291,6 +2294,7 @@ static void nvme_reset_work(struct work_struct *work)
container_of(work, struct nvme_dev, ctrl.reset_work); container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV; int result = -ENODEV;
enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
goto out; goto out;
...@@ -2354,15 +2358,23 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -2354,15 +2358,23 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "IO queues not created\n"); dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl); nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl);
new_state = NVME_CTRL_ADMIN_ONLY;
} else { } else {
nvme_start_queues(&dev->ctrl); nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl); nvme_wait_freeze(&dev->ctrl);
nvme_dev_add(dev); /* hit this only when allocate tagset fails */
if (nvme_dev_add(dev))
new_state = NVME_CTRL_ADMIN_ONLY;
nvme_unfreeze(&dev->ctrl); nvme_unfreeze(&dev->ctrl);
} }
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { /*
dev_warn(dev->ctrl.device, "failed to mark controller live\n"); * If only admin queue live, keep it to do further investigation or
* recovery.
*/
if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
dev_warn(dev->ctrl.device,
"failed to mark controller state %d\n", new_state);
goto out; goto out;
} }
...@@ -2498,10 +2510,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2498,10 +2510,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result) if (result)
goto release_pools; goto release_pools;
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
queue_work(nvme_wq, &dev->ctrl.reset_work); nvme_reset_ctrl(&dev->ctrl);
return 0; return 0;
release_pools: release_pools:
......
...@@ -2006,6 +2006,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, ...@@ -2006,6 +2006,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
static struct nvmf_transport_ops nvme_rdma_transport = { static struct nvmf_transport_ops nvme_rdma_transport = {
.name = "rdma", .name = "rdma",
.module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR, .required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO, NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
......
...@@ -830,7 +830,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -830,7 +830,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
/* Don't accept keep-alive timeout for discovery controllers */ /* Don't accept keep-alive timeout for discovery controllers */
if (kato) { if (kato) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto out_free_sqs; goto out_remove_ida;
} }
/* /*
...@@ -860,6 +860,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -860,6 +860,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl; *ctrlp = ctrl;
return 0; return 0;
out_remove_ida:
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
out_free_sqs: out_free_sqs:
kfree(ctrl->sqs); kfree(ctrl->sqs);
out_free_cqs: out_free_cqs:
...@@ -877,21 +879,22 @@ static void nvmet_ctrl_free(struct kref *ref) ...@@ -877,21 +879,22 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
struct nvmet_subsys *subsys = ctrl->subsys; struct nvmet_subsys *subsys = ctrl->subsys;
nvmet_stop_keep_alive_timer(ctrl);
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
list_del(&ctrl->subsys_entry); list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
nvmet_stop_keep_alive_timer(ctrl);
flush_work(&ctrl->async_event_work); flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work); cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&cntlid_ida, ctrl->cntlid); ida_simple_remove(&cntlid_ida, ctrl->cntlid);
nvmet_subsys_put(subsys);
kfree(ctrl->sqs); kfree(ctrl->sqs);
kfree(ctrl->cqs); kfree(ctrl->cqs);
kfree(ctrl); kfree(ctrl);
nvmet_subsys_put(subsys);
} }
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
......
...@@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ...@@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out_ctrl_put; goto out_ctrl_put;
} }
pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
out: out:
kfree(d); kfree(d);
......
...@@ -2490,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port) ...@@ -2490,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) && if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) { (tgtport->fc_target_port.port_name == traddr.pn)) {
/* a FC port can only be 1 nvmet port id */
if (!tgtport->port) {
tgtport->port = port; tgtport->port = port;
port->priv = tgtport;
nvmet_fc_tgtport_get(tgtport);
ret = 0; ret = 0;
} else
ret = -EALREADY;
break; break;
} }
} }
...@@ -2508,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port) ...@@ -2508,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port)
static void static void
nvmet_fc_remove_port(struct nvmet_port *port) nvmet_fc_remove_port(struct nvmet_port *port)
{ {
struct nvmet_fc_tgtport *tgtport = port->priv; /* nothing to do */
unsigned long flags;
bool matched = false;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
if (tgtport->port == port) {
matched = true;
tgtport->port = NULL;
}
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
if (matched)
nvmet_fc_tgtport_put(tgtport);
} }
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
......
This diff is collapsed.
...@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = { ...@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = {
static struct nvmf_transport_ops nvme_loop_transport = { static struct nvmf_transport_ops nvme_loop_transport = {
.name = "loop", .name = "loop",
.module = THIS_MODULE,
.create_ctrl = nvme_loop_create_ctrl, .create_ctrl = nvme_loop_create_ctrl,
}; };
......
...@@ -921,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) ...@@ -921,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
{ {
pr_info("freeing queue %d\n", queue->idx); pr_debug("freeing queue %d\n", queue->idx);
nvmet_sq_destroy(&queue->nvme_sq); nvmet_sq_destroy(&queue->nvme_sq);
...@@ -1503,25 +1503,9 @@ static int __init nvmet_rdma_init(void) ...@@ -1503,25 +1503,9 @@ static int __init nvmet_rdma_init(void)
static void __exit nvmet_rdma_exit(void) static void __exit nvmet_rdma_exit(void)
{ {
struct nvmet_rdma_queue *queue;
nvmet_unregister_transport(&nvmet_rdma_ops); nvmet_unregister_transport(&nvmet_rdma_ops);
flush_scheduled_work();
mutex_lock(&nvmet_rdma_queue_mutex);
while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
struct nvmet_rdma_queue, queue_list))) {
list_del_init(&queue->queue_list);
mutex_unlock(&nvmet_rdma_queue_mutex);
__nvmet_rdma_queue_disconnect(queue);
mutex_lock(&nvmet_rdma_queue_mutex);
}
mutex_unlock(&nvmet_rdma_queue_mutex);
flush_scheduled_work();
ib_unregister_client(&nvmet_rdma_ib_client); ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
ida_destroy(&nvmet_rdma_queue_ida); ida_destroy(&nvmet_rdma_queue_ida);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment