Commit 21fa1004 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.4' of git://git.infradead.org/nvme into for-5.4/block

Pull NVMe updates from Sagi:

"Highlights includes:
 - controller reset and namespace scan races fixes
 - nvme discovery log change uevent support
 - naming improvements from Keith
 - multiple discovery controllers reject fix from James
 - some regular cleanups from various people"

* 'nvme-5.4' of git://git.infradead.org/nvme:
  nvmet: fix a wrong error status returned in error log page
  nvme: send discovery log page change events to userspace
  nvme: add uevent variables for controller devices
  nvme: enable aen regardless of the presence of I/O queues
  nvme-fabrics: allow discovery subsystems accept a kato
  nvmet: Use PTR_ERR_OR_ZERO() in nvmet_init_discovery()
  nvme: Remove redundant assignment of cq vector
  nvme: Assign subsys instance from first ctrl
  nvme: tcp: remove redundant assignment to variable ret
  nvme: include admin_q sync with nvme_sync_queues
  nvme: Treat discovery subsystems as unique subsystems
  nvme: fix ns removal hang when failing to revalidate due to a transient error
  nvme: make nvme_report_ns_ids propagate error back
  nvme: make nvme_identify_ns propagate errors back
  nvme: pass status to nvme_error_status
  nvme-fc: Fail transport errors with NVME_SC_HOST_PATH
  nvme-tcp: fail command with NVME_SC_HOST_PATH_ERROR send failed
  nvme: fail cancelled commands with NVME_SC_HOST_PATH_ERROR
parents 0a67b5a9 5f8badbc
This diff is collapsed.
...@@ -381,8 +381,8 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ...@@ -381,8 +381,8 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
* Set keep-alive timeout in seconds granularity (ms * 1000) * Set keep-alive timeout in seconds granularity (ms * 1000)
* and add a grace period for controller kato enforcement * and add a grace period for controller kato enforcement
*/ */
cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 : cmd.connect.kato = ctrl->kato ?
cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000); cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000) : 0;
if (ctrl->opts->disable_sqflow) if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW; cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
...@@ -740,13 +740,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -740,13 +740,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
} }
opts->kato = token; opts->kato = token;
if (opts->discovery_nqn && opts->kato) {
pr_err("Discovery controllers cannot accept KATO != 0\n");
ret = -EINVAL;
goto out;
}
break; break;
case NVMF_OPT_CTRL_LOSS_TMO: case NVMF_OPT_CTRL_LOSS_TMO:
if (match_int(args, &token)) { if (match_int(args, &token)) {
...@@ -883,7 +876,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -883,7 +876,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
} }
if (opts->discovery_nqn) { if (opts->discovery_nqn) {
opts->kato = 0;
opts->nr_io_queues = 0; opts->nr_io_queues = 0;
opts->nr_write_queues = 0; opts->nr_write_queues = 0;
opts->nr_poll_queues = 0; opts->nr_poll_queues = 0;
......
...@@ -1608,9 +1608,13 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) ...@@ -1608,9 +1608,13 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
sizeof(op->rsp_iu), DMA_FROM_DEVICE); sizeof(op->rsp_iu), DMA_FROM_DEVICE);
if (opstate == FCPOP_STATE_ABORTED) if (opstate == FCPOP_STATE_ABORTED)
status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
else if (freq->status) else if (freq->status) {
status = cpu_to_le16(NVME_SC_INTERNAL << 1); status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: io failed due to lldd error %d\n",
ctrl->cnum, freq->status);
}
/* /*
* For the linux implementation, if we have an unsuccesful * For the linux implementation, if we have an unsuccesful
...@@ -1637,8 +1641,13 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) ...@@ -1637,8 +1641,13 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
* no payload in the CQE by the transport. * no payload in the CQE by the transport.
*/ */
if (freq->transferred_length != if (freq->transferred_length !=
be32_to_cpu(op->cmd_iu.data_len)) { be32_to_cpu(op->cmd_iu.data_len)) {
status = cpu_to_le16(NVME_SC_INTERNAL << 1); status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: io failed due to bad transfer "
"length: %d vs expected %d\n",
ctrl->cnum, freq->transferred_length,
be32_to_cpu(op->cmd_iu.data_len));
goto done; goto done;
} }
result.u64 = 0; result.u64 = 0;
...@@ -1655,7 +1664,17 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) ...@@ -1655,7 +1664,17 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
freq->transferred_length || freq->transferred_length ||
op->rsp_iu.status_code || op->rsp_iu.status_code ||
sqe->common.command_id != cqe->command_id)) { sqe->common.command_id != cqe->command_id)) {
status = cpu_to_le16(NVME_SC_INTERNAL << 1); status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
"iu len %d, xfr len %d vs %d, status code "
"%d, cmdid %d vs %d\n",
ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
be32_to_cpu(op->rsp_iu.xfrd_len),
freq->transferred_length,
op->rsp_iu.status_code,
sqe->common.command_id,
cqe->command_id);
goto done; goto done;
} }
result = cqe->result; result = cqe->result;
...@@ -1663,7 +1682,11 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) ...@@ -1663,7 +1682,11 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
break; break;
default: default:
status = cpu_to_le16(NVME_SC_INTERNAL << 1); status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
"len %d\n",
ctrl->cnum, freq->rcv_rsplen);
goto done; goto done;
} }
......
...@@ -1555,7 +1555,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) ...@@ -1555,7 +1555,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
nvme_init_queue(nvmeq, qid); nvme_init_queue(nvmeq, qid);
if (!polled) { if (!polled) {
nvmeq->cq_vector = vector;
result = queue_request_irq(nvmeq); result = queue_request_irq(nvmeq);
if (result < 0) if (result < 0)
goto release_sq; goto release_sq;
......
...@@ -842,7 +842,7 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) ...@@ -842,7 +842,7 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req) static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{ {
nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR); nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
} }
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
...@@ -1824,7 +1824,7 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) ...@@ -1824,7 +1824,7 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
{ {
struct nvmf_ctrl_options *opts = ctrl->opts; struct nvmf_ctrl_options *opts = ctrl->opts;
int ret = -EINVAL; int ret;
ret = nvme_tcp_configure_admin_queue(ctrl, new); ret = nvme_tcp_configure_admin_queue(ctrl, new);
if (ret) if (ret)
......
...@@ -37,7 +37,6 @@ static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) ...@@ -37,7 +37,6 @@ static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
static void nvmet_execute_get_log_page_error(struct nvmet_req *req) static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
{ {
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
u16 status = NVME_SC_SUCCESS;
unsigned long flags; unsigned long flags;
off_t offset = 0; off_t offset = 0;
u64 slot; u64 slot;
...@@ -47,9 +46,8 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req) ...@@ -47,9 +46,8 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) { for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
sizeof(struct nvme_error_slot)); sizeof(struct nvme_error_slot)))
if (status)
break; break;
if (slot == 0) if (slot == 0)
...@@ -59,7 +57,7 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req) ...@@ -59,7 +57,7 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
offset += sizeof(struct nvme_error_slot); offset += sizeof(struct nvme_error_slot);
} }
spin_unlock_irqrestore(&ctrl->error_lock, flags); spin_unlock_irqrestore(&ctrl->error_lock, flags);
nvmet_req_complete(req, status); nvmet_req_complete(req, 0);
} }
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
......
...@@ -381,9 +381,7 @@ int __init nvmet_init_discovery(void) ...@@ -381,9 +381,7 @@ int __init nvmet_init_discovery(void)
{ {
nvmet_disc_subsys = nvmet_disc_subsys =
nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC); nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
if (IS_ERR(nvmet_disc_subsys)) return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
return PTR_ERR(nvmet_disc_subsys);
return 0;
} }
void nvmet_exit_discovery(void) void nvmet_exit_discovery(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment