Commit 815a76b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.8-2024-02-01' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
     - Remove duplicated enums (Guixen)
     - Use appropriate controller state accessors (Keith)
     - Retryable authentication (Hannes)
     - Add missing module descriptions (Chaitanya)
     - Fibre-channel fixes for blktests (Daniel)
     - Various type correctness updates (Caleb)
     - Improve fabrics connection debugging prints (Nitin)
     - Passthrough command verbose error logging (Adam)

 - Fix for where we set IO priority in the bio for drivers that use
   fops->submit_bio() to queue IO, like md/dm etc.

* tag 'block-6.8-2024-02-01' of git://git.kernel.dk/linux: (32 commits)
  block: Fix where bio IO priority gets set
  nvme: allow passthru cmd error logging
  nvme-fc: show hostnqn when connecting to fc target
  nvme-rdma: show hostnqn when connecting to rdma target
  nvme-tcp: show hostnqn when connecting to tcp target
  nvmet-fc: use RCU list iterator for assoc_list
  nvmet-fc: take ref count on tgtport before delete assoc
  nvmet-fc: avoid deadlock on delete association path
  nvmet-fc: abort command when there is no binding
  nvmet-fc: do not tack refs on tgtports from assoc
  nvmet-fc: remove null hostport pointer check
  nvmet-fc: hold reference on hostport match
  nvmet-fc: free queue and assoc directly
  nvmet-fc: defer cleanup using RCU properly
  nvmet-fc: release reference on target port
  nvmet-fcloop: swap the list_add_tail arguments
  nvme-fc: do not wait in vain when unloading module
  nvme-fc: log human-readable opcode on timeout
  nvme: split out fabrics version of nvme_opcode_str()
  nvme: take const cmd pointer in read-only helpers
  ...
parents 717ca0b8 f3c89983
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include "blk-pm.h" #include "blk-pm.h"
#include "blk-cgroup.h" #include "blk-cgroup.h"
#include "blk-throttle.h" #include "blk-throttle.h"
#include "blk-ioprio.h"
struct dentry *blk_debugfs_root; struct dentry *blk_debugfs_root;
...@@ -833,6 +834,14 @@ void submit_bio_noacct(struct bio *bio) ...@@ -833,6 +834,14 @@ void submit_bio_noacct(struct bio *bio)
} }
EXPORT_SYMBOL(submit_bio_noacct); EXPORT_SYMBOL(submit_bio_noacct);
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/** /**
* submit_bio - submit a bio to the block device layer for I/O * submit_bio - submit a bio to the block device layer for I/O
* @bio: The &struct bio which describes the I/O * @bio: The &struct bio which describes the I/O
...@@ -855,6 +864,7 @@ void submit_bio(struct bio *bio) ...@@ -855,6 +864,7 @@ void submit_bio(struct bio *bio)
count_vm_events(PGPGOUT, bio_sectors(bio)); count_vm_events(PGPGOUT, bio_sectors(bio));
} }
bio_set_ioprio(bio);
submit_bio_noacct(bio); submit_bio_noacct(bio);
} }
EXPORT_SYMBOL(submit_bio); EXPORT_SYMBOL(submit_bio);
......
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
#include "blk-stat.h" #include "blk-stat.h"
#include "blk-mq-sched.h" #include "blk-mq-sched.h"
#include "blk-rq-qos.h" #include "blk-rq-qos.h"
#include "blk-ioprio.h"
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd); static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
...@@ -2944,14 +2943,6 @@ static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, ...@@ -2944,14 +2943,6 @@ static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
return true; return true;
} }
static void bio_set_ioprio(struct bio *bio)
{
/* Nobody set ioprio so far? Initialize it based on task's nice value */
if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/** /**
* blk_mq_submit_bio - Create and send a request to block device. * blk_mq_submit_bio - Create and send a request to block device.
* @bio: Bio pointer. * @bio: Bio pointer.
...@@ -2976,7 +2967,6 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2976,7 +2967,6 @@ void blk_mq_submit_bio(struct bio *bio)
blk_status_t ret; blk_status_t ret;
bio = blk_queue_bounce(bio, q); bio = blk_queue_bounce(bio, q);
bio_set_ioprio(bio);
if (plug) { if (plug) {
rq = rq_list_peek(&plug->cached_rq); rq = rq_list_peek(&plug->cached_rq);
......
...@@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key) ...@@ -471,4 +471,5 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
} }
EXPORT_SYMBOL_GPL(nvme_auth_generate_key); EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
MODULE_DESCRIPTION("NVMe Authentication framework");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void) ...@@ -181,5 +181,6 @@ static void __exit nvme_keyring_exit(void)
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
MODULE_DESCRIPTION("NVMe Keyring implementation");
module_init(nvme_keyring_init); module_init(nvme_keyring_init);
module_exit(nvme_keyring_exit); module_exit(nvme_keyring_exit);
...@@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set, ...@@ -797,6 +797,7 @@ static int apple_nvme_init_request(struct blk_mq_tag_set *set,
static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
{ {
enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
bool dead = false, freeze = false; bool dead = false, freeze = false;
unsigned long flags; unsigned long flags;
...@@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) ...@@ -808,8 +809,8 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
if (csts & NVME_CSTS_CFS) if (csts & NVME_CSTS_CFS)
dead = true; dead = true;
if (anv->ctrl.state == NVME_CTRL_LIVE || if (state == NVME_CTRL_LIVE ||
anv->ctrl.state == NVME_CTRL_RESETTING) { state == NVME_CTRL_RESETTING) {
freeze = true; freeze = true;
nvme_start_freeze(&anv->ctrl); nvme_start_freeze(&anv->ctrl);
} }
...@@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) ...@@ -881,7 +882,7 @@ static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
unsigned long flags; unsigned long flags;
u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS); u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
if (anv->ctrl.state != NVME_CTRL_LIVE) { if (nvme_ctrl_state(&anv->ctrl) != NVME_CTRL_LIVE) {
/* /*
* From rdma.c: * From rdma.c:
* If we are resetting, connecting or deleting we should * If we are resetting, connecting or deleting we should
...@@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work) ...@@ -985,10 +986,10 @@ static void apple_nvme_reset_work(struct work_struct *work)
u32 boot_status, aqa; u32 boot_status, aqa;
struct apple_nvme *anv = struct apple_nvme *anv =
container_of(work, struct apple_nvme, ctrl.reset_work); container_of(work, struct apple_nvme, ctrl.reset_work);
enum nvme_ctrl_state state = nvme_ctrl_state(&anv->ctrl);
if (anv->ctrl.state != NVME_CTRL_RESETTING) { if (state != NVME_CTRL_RESETTING) {
dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", dev_warn(anv->dev, "ctrl state %d is not RESETTING\n", state);
anv->ctrl.state);
ret = -ENODEV; ret = -ENODEV;
goto out; goto out;
} }
......
...@@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context { ...@@ -48,11 +48,6 @@ struct nvme_dhchap_queue_context {
static struct workqueue_struct *nvme_auth_wq; static struct workqueue_struct *nvme_auth_wq;
#define nvme_auth_flags_from_qid(qid) \
(qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
#define nvme_auth_queue_from_qid(ctrl, qid) \
(qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl) static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
{ {
return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues + return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
...@@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, ...@@ -63,10 +58,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
void *data, size_t data_len, bool auth_send) void *data, size_t data_len, bool auth_send)
{ {
struct nvme_command cmd = {}; struct nvme_command cmd = {};
blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); struct request_queue *q = ctrl->fabrics_q;
int ret; int ret;
if (qid != 0) {
flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
q = ctrl->connect_q;
}
cmd.auth_common.opcode = nvme_fabrics_command; cmd.auth_common.opcode = nvme_fabrics_command;
cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
cmd.auth_common.spsp0 = 0x01; cmd.auth_common.spsp0 = 0x01;
...@@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, ...@@ -80,8 +80,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
} }
ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
qid == 0 ? NVME_QID_ANY : qid, qid == 0 ? NVME_QID_ANY : qid, flags);
0, flags);
if (ret > 0) if (ret > 0)
dev_warn(ctrl->device, dev_warn(ctrl->device,
"qid %d auth_send failed with status %d\n", qid, ret); "qid %d auth_send failed with status %d\n", qid, ret);
...@@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work) ...@@ -897,7 +896,7 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
* If the ctrl is no connected, bail as reconnect will handle * If the ctrl is no connected, bail as reconnect will handle
* authentication. * authentication.
*/ */
if (ctrl->state != NVME_CTRL_LIVE) if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return; return;
/* Authenticate admin queue first */ /* Authenticate admin queue first */
......
...@@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = { ...@@ -171,15 +171,15 @@ static const char * const nvme_statuses[] = {
[NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command", [NVME_SC_HOST_ABORTED_CMD] = "Host Aborted Command",
}; };
const unsigned char *nvme_get_error_status_str(u16 status) const char *nvme_get_error_status_str(u16 status)
{ {
status &= 0x7ff; status &= 0x7ff;
if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status])
return nvme_statuses[status & 0x7ff]; return nvme_statuses[status];
return "Unknown"; return "Unknown";
} }
const unsigned char *nvme_get_opcode_str(u8 opcode) const char *nvme_get_opcode_str(u8 opcode)
{ {
if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode]) if (opcode < ARRAY_SIZE(nvme_ops) && nvme_ops[opcode])
return nvme_ops[opcode]; return nvme_ops[opcode];
...@@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode) ...@@ -187,7 +187,7 @@ const unsigned char *nvme_get_opcode_str(u8 opcode)
} }
EXPORT_SYMBOL_GPL(nvme_get_opcode_str); EXPORT_SYMBOL_GPL(nvme_get_opcode_str);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode) const char *nvme_get_admin_opcode_str(u8 opcode)
{ {
if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode]) if (opcode < ARRAY_SIZE(nvme_admin_ops) && nvme_admin_ops[opcode])
return nvme_admin_ops[opcode]; return nvme_admin_ops[opcode];
...@@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode) ...@@ -195,7 +195,7 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
} }
EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str); EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) { const char *nvme_get_fabrics_opcode_str(u8 opcode) {
if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode]) if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
return nvme_fabrics_ops[opcode]; return nvme_fabrics_ops[opcode];
return "Unknown"; return "Unknown";
......
...@@ -338,6 +338,30 @@ static void nvme_log_error(struct request *req) ...@@ -338,6 +338,30 @@ static void nvme_log_error(struct request *req)
nr->status & NVME_SC_DNR ? "DNR " : ""); nr->status & NVME_SC_DNR ? "DNR " : "");
} }
static void nvme_log_err_passthru(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
struct nvme_request *nr = nvme_req(req);
pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
"cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
nvme_get_admin_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode,
nvme_get_error_status_str(nr->status),
nr->status >> 8 & 7, /* Status Code Type */
nr->status & 0xff, /* Status Code */
nr->status & NVME_SC_MORE ? "MORE " : "",
nr->status & NVME_SC_DNR ? "DNR " : "",
nr->cmd->common.cdw10,
nr->cmd->common.cdw11,
nr->cmd->common.cdw12,
nr->cmd->common.cdw13,
nr->cmd->common.cdw14,
nr->cmd->common.cdw14);
}
enum nvme_disposition { enum nvme_disposition {
COMPLETE, COMPLETE,
RETRY, RETRY,
...@@ -385,8 +409,12 @@ static inline void nvme_end_req(struct request *req) ...@@ -385,8 +409,12 @@ static inline void nvme_end_req(struct request *req)
{ {
blk_status_t status = nvme_error_status(nvme_req(req)->status); blk_status_t status = nvme_error_status(nvme_req(req)->status);
if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
nvme_log_error(req); if (blk_rq_is_passthrough(req))
nvme_log_err_passthru(req);
else
nvme_log_error(req);
}
nvme_end_req_zoned(req); nvme_end_req_zoned(req);
nvme_trace_bio_complete(req); nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH) if (req->cmd_flags & REQ_NVME_MPATH)
...@@ -679,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req) ...@@ -679,10 +707,21 @@ static inline void nvme_clear_nvme_request(struct request *req)
/* initialize a passthrough request */ /* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd) void nvme_init_request(struct request *req, struct nvme_command *cmd)
{ {
if (req->q->queuedata) struct nvme_request *nr = nvme_req(req);
bool logging_enabled;
if (req->q->queuedata) {
struct nvme_ns *ns = req->q->disk->private_data;
logging_enabled = ns->passthru_err_log_enabled;
req->timeout = NVME_IO_TIMEOUT; req->timeout = NVME_IO_TIMEOUT;
else /* no queuedata implies admin queue */ } else { /* no queuedata implies admin queue */
logging_enabled = nr->ctrl->passthru_err_log_enabled;
req->timeout = NVME_ADMIN_TIMEOUT; req->timeout = NVME_ADMIN_TIMEOUT;
}
if (!logging_enabled)
req->rq_flags |= RQF_QUIET;
/* passthru commands should let the driver set the SGL flags */ /* passthru commands should let the driver set the SGL flags */
cmd->common.flags &= ~NVME_CMD_SGL_ALL; cmd->common.flags &= ~NVME_CMD_SGL_ALL;
...@@ -691,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd) ...@@ -691,8 +730,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->mq_hctx->type == HCTX_TYPE_POLL) if (req->mq_hctx->type == HCTX_TYPE_POLL)
req->cmd_flags |= REQ_POLLED; req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req); nvme_clear_nvme_request(req);
req->rq_flags |= RQF_QUIET; memcpy(nr->cmd, cmd, sizeof(*cmd));
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
} }
EXPORT_SYMBOL_GPL(nvme_init_request); EXPORT_SYMBOL_GPL(nvme_init_request);
...@@ -721,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, ...@@ -721,7 +759,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live) bool queue_live, enum nvme_ctrl_state state)
{ {
struct nvme_request *req = nvme_req(rq); struct nvme_request *req = nvme_req(rq);
...@@ -742,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, ...@@ -742,7 +780,7 @@ bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* command, which is require to set the queue live in the * command, which is require to set the queue live in the
* appropinquate states. * appropinquate states.
*/ */
switch (nvme_ctrl_state(ctrl)) { switch (state) {
case NVME_CTRL_CONNECTING: case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
(req->cmd->fabrics.fctype == nvme_fabrics_type_connect || (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
...@@ -1051,20 +1089,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); ...@@ -1051,20 +1089,27 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
*/ */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen, union nvme_result *result, void *buffer, unsigned bufflen,
int qid, int at_head, blk_mq_req_flags_t flags) int qid, nvme_submit_flags_t flags)
{ {
struct request *req; struct request *req;
int ret; int ret;
blk_mq_req_flags_t blk_flags = 0;
if (flags & NVME_SUBMIT_NOWAIT)
blk_flags |= BLK_MQ_REQ_NOWAIT;
if (flags & NVME_SUBMIT_RESERVED)
blk_flags |= BLK_MQ_REQ_RESERVED;
if (qid == NVME_QID_ANY) if (qid == NVME_QID_ANY)
req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
else else
req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
qid - 1); qid - 1);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
nvme_init_request(req, cmd); nvme_init_request(req, cmd);
if (flags & NVME_SUBMIT_RETRY)
req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
if (buffer && bufflen) { if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
...@@ -1072,7 +1117,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -1072,7 +1117,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out; goto out;
} }
ret = nvme_execute_rq(req, at_head); ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
if (result && ret >= 0) if (result && ret >= 0)
*result = nvme_req(req)->result; *result = nvme_req(req)->result;
out: out:
...@@ -1085,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -1085,7 +1130,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen) void *buffer, unsigned bufflen)
{ {
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0);
} }
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
...@@ -1560,7 +1605,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, ...@@ -1560,7 +1605,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11); c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
buffer, buflen, NVME_QID_ANY, 0, 0); buffer, buflen, NVME_QID_ANY, 0);
if (ret >= 0 && result) if (ret >= 0 && result)
*result = le32_to_cpu(res.u32); *result = le32_to_cpu(res.u32);
return ret; return ret;
...@@ -2172,7 +2217,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l ...@@ -2172,7 +2217,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
cmd.common.cdw11 = cpu_to_le32(len); cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
NVME_QID_ANY, 1, 0); NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
} }
static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
...@@ -3651,6 +3696,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) ...@@ -3651,6 +3696,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
ns->disk = disk; ns->disk = disk;
ns->queue = disk->queue; ns->queue = disk->queue;
ns->passthru_err_log_enabled = false;
if (ctrl->opts && ctrl->opts->data_digest) if (ctrl->opts && ctrl->opts->data_digest)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
...@@ -3714,6 +3760,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) ...@@ -3714,6 +3760,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
nvme_mpath_add_disk(ns, info->anagrpid); nvme_mpath_add_disk(ns, info->anagrpid);
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
/*
* Set ns->disk->device->driver_data to ns so we can access
* ns->logging_enabled in nvme_passthru_err_log_enabled_store() and
* nvme_passthru_err_log_enabled_show().
*/
dev_set_drvdata(disk_to_dev(ns->disk), ns);
return; return;
out_cleanup_ns_from_list: out_cleanup_ns_from_list:
...@@ -4514,6 +4567,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ...@@ -4514,6 +4567,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
int ret; int ret;
WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
ctrl->passthru_err_log_enabled = false;
clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
spin_lock_init(&ctrl->lock); spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock); mutex_init(&ctrl->scan_lock);
...@@ -4851,5 +4905,6 @@ static void __exit nvme_core_exit(void) ...@@ -4851,5 +4905,6 @@ static void __exit nvme_core_exit(void)
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION("1.0"); MODULE_VERSION("1.0");
MODULE_DESCRIPTION("NVMe host core framework");
module_init(nvme_core_init); module_init(nvme_core_init);
module_exit(nvme_core_exit); module_exit(nvme_core_exit);
...@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) ...@@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off); cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0);
if (ret >= 0) if (ret >= 0)
*val = le64_to_cpu(res.u64); *val = le64_to_cpu(res.u64);
...@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) ...@@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off); cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0);
if (ret >= 0) if (ret >= 0)
*val = le64_to_cpu(res.u64); *val = le64_to_cpu(res.u64);
...@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) ...@@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val); cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0);
if (unlikely(ret)) if (unlikely(ret))
dev_err(ctrl->device, dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n", "Property Set error: %d, offset %#x\n",
...@@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ...@@ -450,8 +450,10 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
return -ENOMEM; return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data), NVME_QID_ANY, 1, data, sizeof(*data), NVME_QID_ANY,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); NVME_SUBMIT_AT_HEAD |
NVME_SUBMIT_NOWAIT |
NVME_SUBMIT_RESERVED);
if (ret) { if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data); &cmd, data);
...@@ -525,8 +527,10 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -525,8 +527,10 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
return -ENOMEM; return -ENOMEM;
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res, ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), qid, 1, data, sizeof(*data), qid,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); NVME_SUBMIT_AT_HEAD |
NVME_SUBMIT_RESERVED |
NVME_SUBMIT_NOWAIT);
if (ret) { if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32), nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data); &cmd, data);
...@@ -1488,6 +1492,7 @@ static void __exit nvmf_exit(void) ...@@ -1488,6 +1492,7 @@ static void __exit nvmf_exit(void)
} }
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("NVMe host fabrics library");
module_init(nvmf_init); module_init(nvmf_init);
module_exit(nvmf_exit); module_exit(nvmf_exit);
...@@ -185,9 +185,11 @@ static inline bool ...@@ -185,9 +185,11 @@ static inline bool
nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts) struct nvmf_ctrl_options *opts)
{ {
if (ctrl->state == NVME_CTRL_DELETING || enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD || if (state == NVME_CTRL_DELETING ||
state == NVME_CTRL_DELETING_NOIO ||
state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
!uuid_equal(&opts->host->id, &ctrl->opts->host->id)) !uuid_equal(&opts->host->id, &ctrl->opts->host->id))
......
...@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list); ...@@ -221,11 +221,6 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt); static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt); static DEFINE_IDA(nvme_fc_ctrl_cnt);
static struct workqueue_struct *nvme_fc_wq;
static bool nvme_fc_waiting_to_unload;
static DECLARE_COMPLETION(nvme_fc_unload_proceed);
/* /*
* These items are short-term. They will eventually be moved into * These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init. * a generic FC class. See comments in module init.
...@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref) ...@@ -255,8 +250,6 @@ nvme_fc_free_lport(struct kref *ref)
/* remove from transport list */ /* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags); spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list); list_del(&lport->port_list);
if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
complete(&nvme_fc_unload_proceed);
spin_unlock_irqrestore(&nvme_fc_lock, flags); spin_unlock_irqrestore(&nvme_fc_lock, flags);
ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num); ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
...@@ -2574,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) ...@@ -2574,6 +2567,7 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{ {
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl; struct nvme_fc_ctrl *ctrl = op->ctrl;
u16 qnum = op->queue->qnum;
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
struct nvme_command *sqe = &cmdiu->sqe; struct nvme_command *sqe = &cmdiu->sqe;
...@@ -2582,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq) ...@@ -2582,10 +2576,11 @@ static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
* will detect the aborted io and will fail the connection. * will detect the aborted io and will fail the connection.
*/ */
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d (%s) w10/11: "
"x%08x/x%08x\n", "x%08x/x%08x\n",
ctrl->cnum, op->queue->qnum, sqe->common.opcode, ctrl->cnum, qnum, sqe->common.opcode, sqe->fabrics.fctype,
sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); nvme_fabrics_opcode_str(qnum, sqe),
sqe->common.cdw10, sqe->common.cdw11);
if (__nvme_fc_abort_op(ctrl, op)) if (__nvme_fc_abort_op(ctrl, op))
nvme_fc_error_recovery(ctrl, "io timeout abort failed"); nvme_fc_error_recovery(ctrl, "io timeout abort failed");
...@@ -3575,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -3575,8 +3570,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
flush_delayed_work(&ctrl->connect_work); flush_delayed_work(&ctrl->connect_work);
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n", "NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl)); ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
return &ctrl->ctrl; return &ctrl->ctrl;
...@@ -3894,10 +3889,6 @@ static int __init nvme_fc_init_module(void) ...@@ -3894,10 +3889,6 @@ static int __init nvme_fc_init_module(void)
{ {
int ret; int ret;
nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
if (!nvme_fc_wq)
return -ENOMEM;
/* /*
* NOTE: * NOTE:
* It is expected that in the future the kernel will combine * It is expected that in the future the kernel will combine
...@@ -3915,7 +3906,7 @@ static int __init nvme_fc_init_module(void) ...@@ -3915,7 +3906,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class); ret = class_register(&fc_class);
if (ret) { if (ret) {
pr_err("couldn't register class fc\n"); pr_err("couldn't register class fc\n");
goto out_destroy_wq; return ret;
} }
/* /*
...@@ -3939,8 +3930,6 @@ static int __init nvme_fc_init_module(void) ...@@ -3939,8 +3930,6 @@ static int __init nvme_fc_init_module(void)
device_destroy(&fc_class, MKDEV(0, 0)); device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class: out_destroy_class:
class_unregister(&fc_class); class_unregister(&fc_class);
out_destroy_wq:
destroy_workqueue(nvme_fc_wq);
return ret; return ret;
} }
...@@ -3960,48 +3949,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport) ...@@ -3960,48 +3949,27 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
} }
static void static void __exit nvme_fc_exit_module(void)
nvme_fc_cleanup_for_unload(void)
{ {
struct nvme_fc_lport *lport; struct nvme_fc_lport *lport;
struct nvme_fc_rport *rport; struct nvme_fc_rport *rport;
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
list_for_each_entry(rport, &lport->endp_list, endp_list) {
nvme_fc_delete_controllers(rport);
}
}
}
static void __exit nvme_fc_exit_module(void)
{
unsigned long flags; unsigned long flags;
bool need_cleanup = false;
spin_lock_irqsave(&nvme_fc_lock, flags); spin_lock_irqsave(&nvme_fc_lock, flags);
nvme_fc_waiting_to_unload = true; list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
if (!list_empty(&nvme_fc_lport_list)) { list_for_each_entry(rport, &lport->endp_list, endp_list)
need_cleanup = true; nvme_fc_delete_controllers(rport);
nvme_fc_cleanup_for_unload();
}
spin_unlock_irqrestore(&nvme_fc_lock, flags); spin_unlock_irqrestore(&nvme_fc_lock, flags);
if (need_cleanup) { flush_workqueue(nvme_delete_wq);
pr_info("%s: waiting for ctlr deletes\n", __func__);
wait_for_completion(&nvme_fc_unload_proceed);
pr_info("%s: ctrl deletes complete\n", __func__);
}
nvmf_unregister_transport(&nvme_fc_transport); nvmf_unregister_transport(&nvme_fc_transport);
ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt);
device_destroy(&fc_class, MKDEV(0, 0)); device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class); class_unregister(&fc_class);
destroy_workqueue(nvme_fc_wq);
} }
module_init(nvme_fc_init_module); module_init(nvme_fc_init_module);
module_exit(nvme_fc_exit_module); module_exit(nvme_fc_exit_module);
MODULE_DESCRIPTION("NVMe host FC transport driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) ...@@ -156,7 +156,7 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
if (!ns->head->disk) if (!ns->head->disk)
continue; continue;
kblockd_schedule_work(&ns->head->requeue_work); kblockd_schedule_work(&ns->head->requeue_work);
if (ctrl->state == NVME_CTRL_LIVE) if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
disk_uevent(ns->head->disk, KOBJ_CHANGE); disk_uevent(ns->head->disk, KOBJ_CHANGE);
} }
up_read(&ctrl->namespaces_rwsem); up_read(&ctrl->namespaces_rwsem);
...@@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns) ...@@ -223,13 +223,14 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
static bool nvme_path_is_disabled(struct nvme_ns *ns) static bool nvme_path_is_disabled(struct nvme_ns *ns)
{ {
enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
/* /*
* We don't treat NVME_CTRL_DELETING as a disabled path as I/O should * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
* still be able to complete assuming that the controller is connected. * still be able to complete assuming that the controller is connected.
* Otherwise it will fail immediately and return to the requeue list. * Otherwise it will fail immediately and return to the requeue list.
*/ */
if (ns->ctrl->state != NVME_CTRL_LIVE && if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
ns->ctrl->state != NVME_CTRL_DELETING)
return true; return true;
if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
!test_bit(NVME_NS_READY, &ns->flags)) !test_bit(NVME_NS_READY, &ns->flags))
...@@ -331,7 +332,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, ...@@ -331,7 +332,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
static inline bool nvme_path_is_optimized(struct nvme_ns *ns) static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
{ {
return ns->ctrl->state == NVME_CTRL_LIVE && return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
ns->ana_state == NVME_ANA_OPTIMIZED; ns->ana_state == NVME_ANA_OPTIMIZED;
} }
...@@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head) ...@@ -358,7 +359,7 @@ static bool nvme_available_path(struct nvme_ns_head *head)
list_for_each_entry_rcu(ns, &head->list, siblings) { list_for_each_entry_rcu(ns, &head->list, siblings) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue; continue;
switch (ns->ctrl->state) { switch (nvme_ctrl_state(ns->ctrl)) {
case NVME_CTRL_LIVE: case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING: case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING: case NVME_CTRL_CONNECTING:
...@@ -667,7 +668,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, ...@@ -667,7 +668,7 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
* controller is ready. * controller is ready.
*/ */
if (nvme_state_is_live(ns->ana_state) && if (nvme_state_is_live(ns->ana_state) &&
ns->ctrl->state == NVME_CTRL_LIVE) nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns); nvme_mpath_set_live(ns);
} }
...@@ -748,7 +749,7 @@ static void nvme_ana_work(struct work_struct *work) ...@@ -748,7 +749,7 @@ static void nvme_ana_work(struct work_struct *work)
{ {
struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
if (ctrl->state != NVME_CTRL_LIVE) if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
return; return;
nvme_read_ana_log(ctrl); nvme_read_ana_log(ctrl);
......
...@@ -263,6 +263,7 @@ enum nvme_ctrl_flags { ...@@ -263,6 +263,7 @@ enum nvme_ctrl_flags {
struct nvme_ctrl { struct nvme_ctrl {
bool comp_seen; bool comp_seen;
bool identified; bool identified;
bool passthru_err_log_enabled;
enum nvme_ctrl_state state; enum nvme_ctrl_state state;
spinlock_t lock; spinlock_t lock;
struct mutex scan_lock; struct mutex scan_lock;
...@@ -522,7 +523,7 @@ struct nvme_ns { ...@@ -522,7 +523,7 @@ struct nvme_ns {
struct device cdev_device; struct device cdev_device;
struct nvme_fault_inject fault_inject; struct nvme_fault_inject fault_inject;
bool passthru_err_log_enabled;
}; };
/* NVMe ns supports metadata actions by the controller (generate/strip) */ /* NVMe ns supports metadata actions by the controller (generate/strip) */
...@@ -805,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); ...@@ -805,17 +806,18 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *req); struct request *req);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live); bool queue_live, enum nvme_ctrl_state state);
static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live) bool queue_live)
{ {
if (likely(ctrl->state == NVME_CTRL_LIVE)) enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
if (likely(state == NVME_CTRL_LIVE))
return true; return true;
if (ctrl->ops->flags & NVME_F_FABRICS && if (ctrl->ops->flags & NVME_F_FABRICS && state == NVME_CTRL_DELETING)
ctrl->state == NVME_CTRL_DELETING)
return queue_live; return queue_live;
return __nvme_check_ready(ctrl, rq, queue_live); return __nvme_check_ready(ctrl, rq, queue_live, state);
} }
/* /*
...@@ -836,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, ...@@ -836,12 +838,27 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
} }
/*
* Flags for __nvme_submit_sync_cmd()
*/
typedef __u32 __bitwise nvme_submit_flags_t;
enum {
/* Insert request at the head of the queue */
NVME_SUBMIT_AT_HEAD = (__force nvme_submit_flags_t)(1 << 0),
/* Set BLK_MQ_REQ_NOWAIT when allocating request */
NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
/* Set BLK_MQ_REQ_RESERVED when allocating request */
NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
/* Retry command when NVME_SC_DNR is not set in the result */
NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
};
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen); void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen, union nvme_result *result, void *buffer, unsigned bufflen,
int qid, int at_head, int qid, nvme_submit_flags_t flags);
blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen, unsigned int dword11, void *buffer, size_t buflen,
u32 *result); u32 *result);
...@@ -1124,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) ...@@ -1124,35 +1141,42 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
} }
#ifdef CONFIG_NVME_VERBOSE_ERRORS #ifdef CONFIG_NVME_VERBOSE_ERRORS
const unsigned char *nvme_get_error_status_str(u16 status); const char *nvme_get_error_status_str(u16 status);
const unsigned char *nvme_get_opcode_str(u8 opcode); const char *nvme_get_opcode_str(u8 opcode);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode); const char *nvme_get_admin_opcode_str(u8 opcode);
const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode); const char *nvme_get_fabrics_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */ #else /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_get_error_status_str(u16 status) static inline const char *nvme_get_error_status_str(u16 status)
{ {
return "I/O Error"; return "I/O Error";
} }
static inline const unsigned char *nvme_get_opcode_str(u8 opcode) static inline const char *nvme_get_opcode_str(u8 opcode)
{ {
return "I/O Cmd"; return "I/O Cmd";
} }
static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) static inline const char *nvme_get_admin_opcode_str(u8 opcode)
{ {
return "Admin Cmd"; return "Admin Cmd";
} }
static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
{ {
return "Fabrics Cmd"; return "Fabrics Cmd";
} }
#endif /* CONFIG_NVME_VERBOSE_ERRORS */ #endif /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype) static inline const char *nvme_opcode_str(int qid, u8 opcode)
{ {
if (opcode == nvme_fabrics_command)
return nvme_get_fabrics_opcode_str(fctype);
return qid ? nvme_get_opcode_str(opcode) : return qid ? nvme_get_opcode_str(opcode) :
nvme_get_admin_opcode_str(opcode); nvme_get_admin_opcode_str(opcode);
} }
static inline const char *nvme_fabrics_opcode_str(
int qid, const struct nvme_command *cmd)
{
if (nvme_is_fabrics(cmd))
return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
return nvme_opcode_str(qid, cmd->common.opcode);
}
#endif /* _NVME_H */ #endif /* _NVME_H */
...@@ -1349,7 +1349,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) ...@@ -1349,7 +1349,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
req->tag, nvme_cid(req), opcode, req->tag, nvme_cid(req), opcode,
nvme_opcode_str(nvmeq->qid, opcode, 0), nvmeq->qid); nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_req(req)->flags |= NVME_REQ_CANCELLED;
goto disable; goto disable;
} }
...@@ -3543,5 +3543,6 @@ static void __exit nvme_exit(void) ...@@ -3543,5 +3543,6 @@ static void __exit nvme_exit(void)
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION("1.0"); MODULE_VERSION("1.0");
MODULE_DESCRIPTION("NVMe host PCIe transport driver");
module_init(nvme_init); module_init(nvme_init);
module_exit(nvme_exit); module_exit(nvme_exit);
...@@ -1410,6 +1410,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, ...@@ -1410,6 +1410,8 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
struct nvme_ns *ns = rq->q->queuedata; struct nvme_ns *ns = rq->q->queuedata;
struct bio *bio = rq->bio; struct bio *bio = rq->bio;
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
u32 xfer_len;
int nr; int nr;
req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
...@@ -1422,8 +1424,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, ...@@ -1422,8 +1424,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
if (unlikely(nr)) if (unlikely(nr))
goto mr_put; goto mr_put;
nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
req->mr->sig_attrs, ns->head->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
...@@ -1441,7 +1442,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, ...@@ -1441,7 +1442,11 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_WRITE; IB_ACCESS_REMOTE_WRITE;
sg->addr = cpu_to_le64(req->mr->iova); sg->addr = cpu_to_le64(req->mr->iova);
put_unaligned_le24(req->mr->length, sg->length); xfer_len = req->mr->length;
/* Check if PI is added by the HW */
if (!pi_count)
xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
put_unaligned_le24(xfer_len, sg->length);
put_unaligned_le32(req->mr->rkey, sg->key); put_unaligned_le32(req->mr->rkey, sg->key);
sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
...@@ -1946,14 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq) ...@@ -1946,14 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue; struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl; struct nvme_rdma_ctrl *ctrl = queue->ctrl;
u8 opcode = req->req.cmd->common.opcode; struct nvme_command *cmd = req->req.cmd;
u8 fctype = req->req.cmd->fabrics.fctype;
int qid = nvme_rdma_queue_idx(queue); int qid = nvme_rdma_queue_idx(queue);
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n", "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
rq->tag, nvme_cid(rq), opcode, rq->tag, nvme_cid(rq), cmd->common.opcode,
nvme_opcode_str(qid, opcode, fctype), qid); nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) { if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
/* /*
...@@ -2296,8 +2300,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, ...@@ -2296,8 +2300,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (ret) if (ret)
goto out_uninit_ctrl; goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_rdma_ctrl_mutex); mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
...@@ -2400,4 +2404,5 @@ static void __exit nvme_rdma_cleanup_module(void) ...@@ -2400,4 +2404,5 @@ static void __exit nvme_rdma_cleanup_module(void)
module_init(nvme_rdma_init_module); module_init(nvme_rdma_init_module);
module_exit(nvme_rdma_cleanup_module); module_exit(nvme_rdma_cleanup_module);
MODULE_DESCRIPTION("NVMe host RDMA transport driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -35,6 +35,62 @@ static ssize_t nvme_sysfs_rescan(struct device *dev, ...@@ -35,6 +35,62 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
} }
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
return sysfs_emit(buf,
ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
}
static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
int err;
bool passthru_err_log_enabled;
err = kstrtobool(buf, &passthru_err_log_enabled);
if (err)
return -EINVAL;
ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
return count;
}
static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvme_ns *n = dev_get_drvdata(dev);
return sysfs_emit(buf, n->passthru_err_log_enabled ? "on\n" : "off\n");
}
static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct nvme_ns *ns = dev_get_drvdata(dev);
int err;
bool passthru_err_log_enabled;
err = kstrtobool(buf, &passthru_err_log_enabled);
if (err)
return -EINVAL;
ns->passthru_err_log_enabled = passthru_err_log_enabled;
return count;
}
static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
{ {
struct gendisk *disk = dev_to_disk(dev); struct gendisk *disk = dev_to_disk(dev);
...@@ -208,6 +264,7 @@ static struct attribute *nvme_ns_attrs[] = { ...@@ -208,6 +264,7 @@ static struct attribute *nvme_ns_attrs[] = {
&dev_attr_ana_grpid.attr, &dev_attr_ana_grpid.attr,
&dev_attr_ana_state.attr, &dev_attr_ana_state.attr,
#endif #endif
&dev_attr_io_passthru_err_log_enabled.attr,
NULL, NULL,
}; };
...@@ -311,6 +368,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, ...@@ -311,6 +368,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
char *buf) char *buf)
{ {
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
unsigned state = (unsigned)nvme_ctrl_state(ctrl);
static const char *const state_name[] = { static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new", [NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live", [NVME_CTRL_LIVE] = "live",
...@@ -321,9 +379,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, ...@@ -321,9 +379,8 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_DEAD] = "dead", [NVME_CTRL_DEAD] = "dead",
}; };
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && if (state < ARRAY_SIZE(state_name) && state_name[state])
state_name[ctrl->state]) return sysfs_emit(buf, "%s\n", state_name[state]);
return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
return sysfs_emit(buf, "unknown state\n"); return sysfs_emit(buf, "unknown state\n");
} }
...@@ -655,6 +712,7 @@ static struct attribute *nvme_dev_attrs[] = { ...@@ -655,6 +712,7 @@ static struct attribute *nvme_dev_attrs[] = {
#ifdef CONFIG_NVME_TCP_TLS #ifdef CONFIG_NVME_TCP_TLS
&dev_attr_tls_key.attr, &dev_attr_tls_key.attr,
#endif #endif
&dev_attr_adm_passthru_err_log_enabled.attr,
NULL NULL
}; };
......
...@@ -2428,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) ...@@ -2428,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype; struct nvme_command *cmd = &pdu->cmd;
int qid = nvme_tcp_queue_id(req->queue); int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device, dev_warn(ctrl->device,
"I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n", "I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
rq->tag, nvme_cid(rq), pdu->hdr.type, opc, rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
nvme_opcode_str(qid, opc, fctype), qid); nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) { if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
/* /*
...@@ -2753,8 +2753,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, ...@@ -2753,8 +2753,8 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
if (ret) if (ret)
goto out_uninit_ctrl; goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n",
nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
mutex_lock(&nvme_tcp_ctrl_mutex); mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
...@@ -2826,4 +2826,5 @@ static void __exit nvme_tcp_cleanup_module(void) ...@@ -2826,4 +2826,5 @@ static void __exit nvme_tcp_cleanup_module(void)
module_init(nvme_tcp_init_module); module_init(nvme_tcp_init_module);
module_exit(nvme_tcp_cleanup_module); module_exit(nvme_tcp_cleanup_module);
MODULE_DESCRIPTION("NVMe host TCP transport driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) ...@@ -248,7 +248,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
continue; continue;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_NS_CHANGED, NVME_AER_NOTICE_NS_CHANGED,
NVME_LOG_CHANGED_NS); NVME_LOG_CHANGED_NS);
} }
...@@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys, ...@@ -265,7 +265,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
continue; continue;
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
continue; continue;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_ANA, NVME_LOG_ANA); NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
} }
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
...@@ -1705,4 +1705,5 @@ static void __exit nvmet_exit(void) ...@@ -1705,4 +1705,5 @@ static void __exit nvmet_exit(void)
module_init(nvmet_init); module_init(nvmet_init);
module_exit(nvmet_exit); module_exit(nvmet_exit);
MODULE_DESCRIPTION("NVMe target core framework");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port, ...@@ -21,7 +21,7 @@ static void __nvmet_disc_changed(struct nvmet_port *port,
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE)) if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
return; return;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC); NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
} }
......
This diff is collapsed.
...@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, ...@@ -358,7 +358,7 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
if (!rport->targetport) { if (!rport->targetport) {
tls_req->status = -ECONNREFUSED; tls_req->status = -ECONNREFUSED;
spin_lock(&rport->lock); spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list); list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work); queue_work(nvmet_wq, &rport->ls_work);
return ret; return ret;
...@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, ...@@ -391,7 +391,7 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
if (remoteport) { if (remoteport) {
rport = remoteport->private; rport = remoteport->private;
spin_lock(&rport->lock); spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list); list_add_tail(&tls_req->ls_list, &rport->ls_list);
spin_unlock(&rport->lock); spin_unlock(&rport->lock);
queue_work(nvmet_wq, &rport->ls_work); queue_work(nvmet_wq, &rport->ls_work);
} }
...@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, ...@@ -446,7 +446,7 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
if (!tport->remoteport) { if (!tport->remoteport) {
tls_req->status = -ECONNREFUSED; tls_req->status = -ECONNREFUSED;
spin_lock(&tport->lock); spin_lock(&tport->lock);
list_add_tail(&tport->ls_list, &tls_req->ls_list); list_add_tail(&tls_req->ls_list, &tport->ls_list);
spin_unlock(&tport->lock); spin_unlock(&tport->lock);
queue_work(nvmet_wq, &tport->ls_work); queue_work(nvmet_wq, &tport->ls_work);
return ret; return ret;
...@@ -1650,4 +1650,5 @@ static void __exit fcloop_exit(void) ...@@ -1650,4 +1650,5 @@ static void __exit fcloop_exit(void)
module_init(fcloop_init); module_init(fcloop_init);
module_exit(fcloop_exit); module_exit(fcloop_exit);
MODULE_DESCRIPTION("NVMe target FC loop transport driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) ...@@ -400,7 +400,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
} }
nvme_quiesce_admin_queue(&ctrl->ctrl); nvme_quiesce_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE) if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
nvme_disable_ctrl(&ctrl->ctrl, true); nvme_disable_ctrl(&ctrl->ctrl, true);
nvme_cancel_admin_tagset(&ctrl->ctrl); nvme_cancel_admin_tagset(&ctrl->ctrl);
...@@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) ...@@ -434,8 +434,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl); nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
if (ctrl->ctrl.state != NVME_CTRL_DELETING && enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
if (state != NVME_CTRL_DELETING &&
state != NVME_CTRL_DELETING_NOIO)
/* state change failure for non-deleted ctrl? */ /* state change failure for non-deleted ctrl? */
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return; return;
...@@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void) ...@@ -688,5 +690,6 @@ static void __exit nvme_loop_cleanup_module(void)
module_init(nvme_loop_init_module); module_init(nvme_loop_init_module);
module_exit(nvme_loop_cleanup_module); module_exit(nvme_loop_cleanup_module);
MODULE_DESCRIPTION("NVMe target loop transport driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */ MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
...@@ -2104,5 +2104,6 @@ static void __exit nvmet_rdma_exit(void) ...@@ -2104,5 +2104,6 @@ static void __exit nvmet_rdma_exit(void)
module_init(nvmet_rdma_init); module_init(nvmet_rdma_init);
module_exit(nvmet_rdma_exit); module_exit(nvmet_rdma_exit);
MODULE_DESCRIPTION("NVMe target RDMA transport driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
...@@ -2216,10 +2216,12 @@ static void __exit nvmet_tcp_exit(void) ...@@ -2216,10 +2216,12 @@ static void __exit nvmet_tcp_exit(void)
flush_workqueue(nvmet_wq); flush_workqueue(nvmet_wq);
destroy_workqueue(nvmet_tcp_wq); destroy_workqueue(nvmet_tcp_wq);
ida_destroy(&nvmet_tcp_queue_ida);
} }
module_init(nvmet_tcp_init); module_init(nvmet_tcp_init);
module_exit(nvmet_tcp_exit); module_exit(nvmet_tcp_exit);
MODULE_DESCRIPTION("NVMe target TCP transport driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
...@@ -816,12 +816,6 @@ struct nvme_reservation_status_ext { ...@@ -816,12 +816,6 @@ struct nvme_reservation_status_ext {
struct nvme_registered_ctrl_ext regctl_eds[]; struct nvme_registered_ctrl_ext regctl_eds[];
}; };
enum nvme_async_event_type {
NVME_AER_TYPE_ERROR = 0,
NVME_AER_TYPE_SMART = 1,
NVME_AER_TYPE_NOTICE = 2,
};
/* I/O commands */ /* I/O commands */
enum nvme_opcode { enum nvme_opcode {
...@@ -1818,7 +1812,7 @@ struct nvme_command { ...@@ -1818,7 +1812,7 @@ struct nvme_command {
}; };
}; };
static inline bool nvme_is_fabrics(struct nvme_command *cmd) static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
{ {
return cmd->common.opcode == nvme_fabrics_command; return cmd->common.opcode == nvme_fabrics_command;
} }
...@@ -1837,7 +1831,7 @@ struct nvme_error_slot { ...@@ -1837,7 +1831,7 @@ struct nvme_error_slot {
__u8 resv2[24]; __u8 resv2[24];
}; };
static inline bool nvme_is_write(struct nvme_command *cmd) static inline bool nvme_is_write(const struct nvme_command *cmd)
{ {
/* /*
* What a mess... * What a mess...
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment