Commit d65cfe90 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvmf-4.10' of git://git.infradead.org/nvme-fabrics into for-4.10/block

Sagi writes:

The major addition here is the nvme FC transport implementation
from James.

What else:
- some cleanups and memory leak fixes in the host side fabrics code from Bart
- possible rcu violation fix from Sasha
- logging change from Max
- small include cleanup
parents 6e85eaf3 475d0fe7
...@@ -8659,6 +8659,16 @@ L: linux-nvme@lists.infradead.org ...@@ -8659,6 +8659,16 @@ L: linux-nvme@lists.infradead.org
S: Supported S: Supported
F: drivers/nvme/target/ F: drivers/nvme/target/
NVM EXPRESS FC TRANSPORT DRIVERS
M: James Smart <james.smart@broadcom.com>
L: linux-nvme@lists.infradead.org
S: Supported
F: include/linux/nvme-fc.h
F: include/linux/nvme-fc-driver.h
F: drivers/nvme/host/fc.c
F: drivers/nvme/target/fc.c
F: drivers/nvme/target/fcloop.c
NVMEM FRAMEWORK NVMEM FRAMEWORK
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Maxime Ripard <maxime.ripard@free-electrons.com> M: Maxime Ripard <maxime.ripard@free-electrons.com>
......
...@@ -43,3 +43,20 @@ config NVME_RDMA ...@@ -43,3 +43,20 @@ config NVME_RDMA
from https://github.com/linux-nvme/nvme-cli. from https://github.com/linux-nvme/nvme-cli.
If unsure, say N. If unsure, say N.
config NVME_FC
tristate "NVM Express over Fabrics FC host driver"
depends on BLOCK
depends on HAS_DMA
select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
This provides support for the NVMe over Fabrics protocol using
the FC transport. This allows you to use remote block devices
exported using the NVMe protocol set.
To configure a NVMe over Fabrics controller use the nvme-cli tool
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
...@@ -2,6 +2,7 @@ obj-$(CONFIG_NVME_CORE) += nvme-core.o ...@@ -2,6 +2,7 @@ obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o nvme-core-y := core.o
nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
...@@ -12,3 +13,5 @@ nvme-y += pci.o ...@@ -12,3 +13,5 @@ nvme-y += pci.o
nvme-fabrics-y += fabrics.o nvme-fabrics-y += fabrics.o
nvme-rdma-y += rdma.o nvme-rdma-y += rdma.o
nvme-fc-y += fc.o
...@@ -303,7 +303,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, ...@@ -303,7 +303,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd)); memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id); cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
...@@ -345,6 +344,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, ...@@ -345,6 +344,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
else else
nvme_setup_rw(ns, req, cmd); nvme_setup_rw(ns, req, cmd);
cmd->common.command_id = req->tag;
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_setup_cmd); EXPORT_SYMBOL_GPL(nvme_setup_cmd);
......
...@@ -666,10 +666,12 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -666,10 +666,12 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (nqnlen >= NVMF_NQN_SIZE) { if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n", pr_err("%s needs to be < %d bytes\n",
p, NVMF_NQN_SIZE); p, NVMF_NQN_SIZE);
kfree(p);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
opts->host = nvmf_host_add(p); opts->host = nvmf_host_add(p);
kfree(p);
if (!opts->host) { if (!opts->host) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -825,8 +827,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) ...@@ -825,8 +827,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
out_unlock: out_unlock:
mutex_unlock(&nvmf_transports_mutex); mutex_unlock(&nvmf_transports_mutex);
out_free_opts: out_free_opts:
nvmf_host_put(opts->host); nvmf_free_options(opts);
kfree(opts);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
......
This diff is collapsed.
...@@ -611,7 +611,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -611,7 +611,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK) if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out; goto out;
cmnd.common.command_id = req->tag;
blk_mq_start_request(req); blk_mq_start_request(req);
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h> #include <rdma/rdma_cm.h>
#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h> #include <linux/nvme-rdma.h>
#include "nvme.h" #include "nvme.h"
...@@ -241,7 +240,9 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, ...@@ -241,7 +240,9 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
static void nvme_rdma_qp_event(struct ib_event *event, void *context) static void nvme_rdma_qp_event(struct ib_event *event, void *context)
{ {
pr_debug("QP event %d\n", event->event); pr_debug("QP event %s (%d)\n",
ib_event_msg(event->event), event->event);
} }
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue) static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
...@@ -1398,7 +1399,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1398,7 +1399,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK) if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret; return ret;
c->common.command_id = rq->tag;
blk_mq_start_request(rq); blk_mq_start_request(rq);
map_len = nvme_map_len(rq); map_len = nvme_map_len(rq);
...@@ -1904,6 +1904,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, ...@@ -1904,6 +1904,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
opts->queue_size = ctrl->ctrl.maxcmd; opts->queue_size = ctrl->ctrl.maxcmd;
} }
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
if (opts->nr_io_queues) { if (opts->nr_io_queues) {
ret = nvme_rdma_create_io_queues(ctrl); ret = nvme_rdma_create_io_queues(ctrl);
if (ret) if (ret)
......
...@@ -1280,10 +1280,6 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, ...@@ -1280,10 +1280,6 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
u16 idx, u16 bd_len, u8 llbaa) u16 idx, u16 bd_len, u8 llbaa)
{ {
u16 bd_num;
bd_num = bd_len / ((llbaa == 0) ?
SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
/* Store block descriptor info if a FORMAT UNIT comes later */ /* Store block descriptor info if a FORMAT UNIT comes later */
/* TODO Saving 1st BD info; what to do if multiple BD received? */ /* TODO Saving 1st BD info; what to do if multiple BD received? */
if (llbaa == 0) { if (llbaa == 0) {
...@@ -1528,7 +1524,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1528,7 +1524,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int nvme_sc; int nvme_sc;
struct nvme_id_ns *id_ns; struct nvme_id_ns *id_ns;
u8 i; u8 i;
u8 flbas, nlbaf; u8 nlbaf;
u8 selected_lbaf = 0xFF; u8 selected_lbaf = 0xFF;
u32 cdw10 = 0; u32 cdw10 = 0;
struct nvme_command c; struct nvme_command c;
...@@ -1539,7 +1535,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1539,7 +1535,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (res) if (res)
return res; return res;
flbas = (id_ns->flbas) & 0x0F;
nlbaf = id_ns->nlbaf; nlbaf = id_ns->nlbaf;
for (i = 0; i < nlbaf; i++) { for (i = 0; i < nlbaf; i++) {
...@@ -2168,12 +2163,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns, ...@@ -2168,12 +2163,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd) u8 *cmd)
{ {
u8 immed, pcmod, no_flush, start; u8 immed, no_flush;
immed = cmd[1] & 0x01; immed = cmd[1] & 0x01;
pcmod = cmd[3] & 0x0f;
no_flush = cmd[4] & 0x04; no_flush = cmd[4] & 0x04;
start = cmd[4] & 0x01;
if (immed != 0) { if (immed != 0) {
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
......
...@@ -34,3 +34,27 @@ config NVME_TARGET_RDMA ...@@ -34,3 +34,27 @@ config NVME_TARGET_RDMA
devices over RDMA. devices over RDMA.
If unsure, say N. If unsure, say N.
config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
depends on HAS_DMA
help
This enables the NVMe FC target support, which allows exporting NVMe
devices over FC.
If unsure, say N.
config NVME_TARGET_FCLOOP
tristate "NVMe over Fabrics FC Transport Loopback Test driver"
depends on NVME_TARGET
select NVME_CORE
select NVME_FABRICS
select SG_POOL
depends on NVME_FC
depends on NVME_TARGET_FC
help
This enables the NVMe FC loopback test support, which can be useful
to test NVMe-FC transport interfaces.
If unsure, say N.
...@@ -2,8 +2,12 @@ ...@@ -2,8 +2,12 @@
obj-$(CONFIG_NVME_TARGET) += nvmet.o obj-$(CONFIG_NVME_TARGET) += nvmet.o
obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \ nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
discovery.o discovery.o
nvme-loop-y += loop.o nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
nvme-fcloop-y += fcloop.o
...@@ -37,6 +37,8 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item, ...@@ -37,6 +37,8 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
return sprintf(page, "ipv6\n"); return sprintf(page, "ipv6\n");
case NVMF_ADDR_FAMILY_IB: case NVMF_ADDR_FAMILY_IB:
return sprintf(page, "ib\n"); return sprintf(page, "ib\n");
case NVMF_ADDR_FAMILY_FC:
return sprintf(page, "fc\n");
default: default:
return sprintf(page, "\n"); return sprintf(page, "\n");
} }
...@@ -59,6 +61,8 @@ static ssize_t nvmet_addr_adrfam_store(struct config_item *item, ...@@ -59,6 +61,8 @@ static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6; port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
} else if (sysfs_streq(page, "ib")) { } else if (sysfs_streq(page, "ib")) {
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB; port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
} else if (sysfs_streq(page, "fc")) {
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
} else { } else {
pr_err("Invalid value '%s' for adrfam\n", page); pr_err("Invalid value '%s' for adrfam\n", page);
return -EINVAL; return -EINVAL;
...@@ -209,6 +213,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item, ...@@ -209,6 +213,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
return sprintf(page, "rdma\n"); return sprintf(page, "rdma\n");
case NVMF_TRTYPE_LOOP: case NVMF_TRTYPE_LOOP:
return sprintf(page, "loop\n"); return sprintf(page, "loop\n");
case NVMF_TRTYPE_FC:
return sprintf(page, "fc\n");
default: default:
return sprintf(page, "\n"); return sprintf(page, "\n");
} }
...@@ -229,6 +235,12 @@ static void nvmet_port_init_tsas_loop(struct nvmet_port *port) ...@@ -229,6 +235,12 @@ static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE); memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
} }
static void nvmet_port_init_tsas_fc(struct nvmet_port *port)
{
port->disc_addr.trtype = NVMF_TRTYPE_FC;
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
}
static ssize_t nvmet_addr_trtype_store(struct config_item *item, static ssize_t nvmet_addr_trtype_store(struct config_item *item,
const char *page, size_t count) const char *page, size_t count)
{ {
...@@ -244,6 +256,8 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item, ...@@ -244,6 +256,8 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
nvmet_port_init_tsas_rdma(port); nvmet_port_init_tsas_rdma(port);
} else if (sysfs_streq(page, "loop")) { } else if (sysfs_streq(page, "loop")) {
nvmet_port_init_tsas_loop(port); nvmet_port_init_tsas_loop(port);
} else if (sysfs_streq(page, "fc")) {
nvmet_port_init_tsas_fc(port);
} else { } else {
pr_err("Invalid value '%s' for trtype\n", page); pr_err("Invalid value '%s' for trtype\n", page);
return -EINVAL; return -EINVAL;
...@@ -271,7 +285,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item, ...@@ -271,7 +285,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
ret = -EBUSY; ret = -EBUSY;
if (nvmet_ns_enabled(ns)) if (ns->enabled)
goto out_unlock; goto out_unlock;
kfree(ns->device_path); kfree(ns->device_path);
...@@ -307,7 +321,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item, ...@@ -307,7 +321,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
int ret = 0; int ret = 0;
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
if (nvmet_ns_enabled(ns)) { if (ns->enabled) {
ret = -EBUSY; ret = -EBUSY;
goto out_unlock; goto out_unlock;
} }
...@@ -339,7 +353,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid); ...@@ -339,7 +353,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid);
static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
{ {
return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item))); return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
} }
static ssize_t nvmet_ns_enable_store(struct config_item *item, static ssize_t nvmet_ns_enable_store(struct config_item *item,
......
...@@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) ...@@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret = 0; int ret = 0;
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
if (!list_empty(&ns->dev_link)) if (ns->enabled)
goto out_unlock; goto out_unlock;
ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE, ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
...@@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) ...@@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
ns->enabled = true;
ret = 0; ret = 0;
out_unlock: out_unlock:
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
...@@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns) ...@@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
struct nvmet_ctrl *ctrl; struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
if (list_empty(&ns->dev_link)) { if (!ns->enabled)
mutex_unlock(&subsys->lock); goto out_unlock;
return;
} ns->enabled = false;
list_del_init(&ns->dev_link); list_del_rcu(&ns->dev_link);
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
/* /*
...@@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns) ...@@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
if (ns->bdev) if (ns->bdev)
blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ); blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
out_unlock:
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -194,7 +194,6 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -194,7 +194,6 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
BUG_ON(iod->req.sg_cnt > req->nr_phys_segments); BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
} }
iod->cmd.common.command_id = req->tag;
blk_mq_start_request(req); blk_mq_start_request(req);
schedule_work(&iod->work); schedule_work(&iod->work);
......
...@@ -47,6 +47,7 @@ struct nvmet_ns { ...@@ -47,6 +47,7 @@ struct nvmet_ns {
loff_t size; loff_t size;
u8 nguid[16]; u8 nguid[16];
bool enabled;
struct nvmet_subsys *subsys; struct nvmet_subsys *subsys;
const char *device_path; const char *device_path;
...@@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) ...@@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
return container_of(to_config_group(item), struct nvmet_ns, group); return container_of(to_config_group(item), struct nvmet_ns, group);
} }
static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
{
return !list_empty_careful(&ns->dev_link);
}
struct nvmet_cq { struct nvmet_cq {
u16 qid; u16 qid;
u16 size; u16 size;
......
...@@ -1044,8 +1044,10 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, ...@@ -1044,8 +1044,10 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
} }
ret = nvmet_sq_init(&queue->nvme_sq); ret = nvmet_sq_init(&queue->nvme_sq);
if (ret) if (ret) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_free_queue; goto out_free_queue;
}
ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
if (ret) if (ret)
...@@ -1114,6 +1116,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, ...@@ -1114,6 +1116,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
out_free_queue: out_free_queue:
kfree(queue); kfree(queue);
out_reject: out_reject:
pr_debug("rejecting connect request with status code %d\n", ret);
nvmet_rdma_cm_reject(cm_id, ret); nvmet_rdma_cm_reject(cm_id, ret);
return NULL; return NULL;
} }
...@@ -1127,7 +1130,8 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) ...@@ -1127,7 +1130,8 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
rdma_notify(queue->cm_id, event->event); rdma_notify(queue->cm_id, event->event);
break; break;
default: default:
pr_err("received unrecognized IB QP event %d\n", event->event); pr_err("received IB QP event: %s (%d)\n",
ib_event_msg(event->event), event->event);
break; break;
} }
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -963,6 +963,19 @@ enum { ...@@ -963,6 +963,19 @@ enum {
NVME_SC_ACCESS_DENIED = 0x286, NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_DNR = 0x4000, NVME_SC_DNR = 0x4000,
/*
* FC Transport-specific error status values for NVME commands
*
* Transport-specific status code values must be in the range 0xB0..0xBF
*/
/* Generic FC failure - catchall */
NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
/* I/O failure due to FC ABTS'd */
NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
}; };
struct nvme_completion { struct nvme_completion {
......
...@@ -27,6 +27,7 @@ typedef struct { ...@@ -27,6 +27,7 @@ typedef struct {
int match_token(char *, const match_table_t table, substring_t args[]); int match_token(char *, const match_table_t table, substring_t args[]);
int match_int(substring_t *, int *result); int match_int(substring_t *, int *result);
int match_u64(substring_t *, u64 *result);
int match_octal(substring_t *, int *result); int match_octal(substring_t *, int *result);
int match_hex(substring_t *, int *result); int match_hex(substring_t *, int *result);
bool match_wildcard(const char *pattern, const char *str); bool match_wildcard(const char *pattern, const char *str);
......
...@@ -190,6 +190,7 @@ enum fc_fh_type { ...@@ -190,6 +190,7 @@ enum fc_fh_type {
FC_TYPE_FCP = 0x08, /* SCSI FCP */ FC_TYPE_FCP = 0x08, /* SCSI FCP */
FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */ FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */
FC_TYPE_ILS = 0x22, /* internal link service */ FC_TYPE_ILS = 0x22, /* internal link service */
FC_TYPE_NVME = 0x28, /* FC-NVME */
}; };
/* /*
...@@ -203,6 +204,7 @@ enum fc_fh_type { ...@@ -203,6 +204,7 @@ enum fc_fh_type {
[FC_TYPE_FCP] = "FCP", \ [FC_TYPE_FCP] = "FCP", \
[FC_TYPE_CT] = "CT", \ [FC_TYPE_CT] = "CT", \
[FC_TYPE_ILS] = "ILS", \ [FC_TYPE_ILS] = "ILS", \
[FC_TYPE_NVME] = "NVME", \
} }
/* /*
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment