Commit dd0b0a4a authored by Weiwen Hu's avatar Weiwen Hu Committed by Keith Busch

nvme: rename CDR/MORE/DNR to NVME_STATUS_*

CDR/MORE/DNR fields are not belonging to SC in the NVMe spec, rename
them to NVME_STATUS_* to avoid confusion.
Signed-off-by: default avatarWeiwen Hu <huweiwen@linux.alibaba.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent d89a5c67
...@@ -307,7 +307,7 @@ static void nvme_retry_req(struct request *req) ...@@ -307,7 +307,7 @@ static void nvme_retry_req(struct request *req)
u16 crd; u16 crd;
/* The mask and shift result must be <= 3 */ /* The mask and shift result must be <= 3 */
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11;
if (crd) if (crd)
delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
...@@ -331,8 +331,8 @@ static void nvme_log_error(struct request *req) ...@@ -331,8 +331,8 @@ static void nvme_log_error(struct request *req)
nvme_get_error_status_str(nr->status), nvme_get_error_status_str(nr->status),
NVME_SCT(nr->status), /* Status Code Type */ NVME_SCT(nr->status), /* Status Code Type */
nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MASK, /* Status Code */
nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_STATUS_MORE ? "MORE " : "",
nr->status & NVME_SC_DNR ? "DNR " : ""); nr->status & NVME_STATUS_DNR ? "DNR " : "");
return; return;
} }
...@@ -343,8 +343,8 @@ static void nvme_log_error(struct request *req) ...@@ -343,8 +343,8 @@ static void nvme_log_error(struct request *req)
nvme_get_error_status_str(nr->status), nvme_get_error_status_str(nr->status),
NVME_SCT(nr->status), /* Status Code Type */ NVME_SCT(nr->status), /* Status Code Type */
nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MASK, /* Status Code */
nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_STATUS_MORE ? "MORE " : "",
nr->status & NVME_SC_DNR ? "DNR " : ""); nr->status & NVME_STATUS_DNR ? "DNR " : "");
} }
static void nvme_log_err_passthru(struct request *req) static void nvme_log_err_passthru(struct request *req)
...@@ -361,8 +361,8 @@ static void nvme_log_err_passthru(struct request *req) ...@@ -361,8 +361,8 @@ static void nvme_log_err_passthru(struct request *req)
nvme_get_error_status_str(nr->status), nvme_get_error_status_str(nr->status),
NVME_SCT(nr->status), /* Status Code Type */ NVME_SCT(nr->status), /* Status Code Type */
nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_SC_MASK, /* Status Code */
nr->status & NVME_SC_MORE ? "MORE " : "", nr->status & NVME_STATUS_MORE ? "MORE " : "",
nr->status & NVME_SC_DNR ? "DNR " : "", nr->status & NVME_STATUS_DNR ? "DNR " : "",
nr->cmd->common.cdw10, nr->cmd->common.cdw10,
nr->cmd->common.cdw11, nr->cmd->common.cdw11,
nr->cmd->common.cdw12, nr->cmd->common.cdw12,
...@@ -384,7 +384,7 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) ...@@ -384,7 +384,7 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
return COMPLETE; return COMPLETE;
if (blk_noretry_request(req) || if (blk_noretry_request(req) ||
(nvme_req(req)->status & NVME_SC_DNR) || (nvme_req(req)->status & NVME_STATUS_DNR) ||
nvme_req(req)->retries >= nvme_max_retries) nvme_req(req)->retries >= nvme_max_retries)
return COMPLETE; return COMPLETE;
...@@ -3940,7 +3940,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) ...@@ -3940,7 +3940,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
{ {
int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
dev_err(ns->ctrl->device, dev_err(ns->ctrl->device,
...@@ -3956,7 +3956,7 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) ...@@ -3956,7 +3956,7 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
* *
* TODO: we should probably schedule a delayed retry here. * TODO: we should probably schedule a delayed retry here.
*/ */
if (ret > 0 && (ret & NVME_SC_DNR)) if (ret > 0 && (ret & NVME_STATUS_DNR))
nvme_ns_remove(ns); nvme_ns_remove(ns);
} }
...@@ -4147,7 +4147,7 @@ static void nvme_scan_work(struct work_struct *work) ...@@ -4147,7 +4147,7 @@ static void nvme_scan_work(struct work_struct *work)
* they report) but don't actually support it. * they report) but don't actually support it.
*/ */
ret = nvme_scan_ns_list(ctrl); ret = nvme_scan_ns_list(ctrl);
if (ret > 0 && ret & NVME_SC_DNR) if (ret > 0 && ret & NVME_STATUS_DNR)
nvme_scan_ns_sequential(ctrl); nvme_scan_ns_sequential(ctrl);
} }
mutex_unlock(&ctrl->scan_lock); mutex_unlock(&ctrl->scan_lock);
......
...@@ -187,7 +187,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) ...@@ -187,7 +187,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
if (unlikely(ret != 0)) if (unlikely(ret != 0))
dev_err(ctrl->device, dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n", "Property Get error: %d, offset %#x\n",
ret > 0 ? ret & ~NVME_SC_DNR : ret, off); ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret; return ret;
} }
...@@ -233,7 +233,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) ...@@ -233,7 +233,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
if (unlikely(ret != 0)) if (unlikely(ret != 0))
dev_err(ctrl->device, dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n", "Property Get error: %d, offset %#x\n",
ret > 0 ? ret & ~NVME_SC_DNR : ret, off); ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvmf_reg_read64); EXPORT_SYMBOL_GPL(nvmf_reg_read64);
...@@ -275,7 +275,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) ...@@ -275,7 +275,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
if (unlikely(ret)) if (unlikely(ret))
dev_err(ctrl->device, dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n", "Property Set error: %d, offset %#x\n",
ret > 0 ? ret & ~NVME_SC_DNR : ret, off); ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvmf_reg_write32); EXPORT_SYMBOL_GPL(nvmf_reg_write32);
...@@ -295,7 +295,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, ...@@ -295,7 +295,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int errval, int offset, struct nvme_command *cmd, int errval, int offset, struct nvme_command *cmd,
struct nvmf_connect_data *data) struct nvmf_connect_data *data)
{ {
int err_sctype = errval & ~NVME_SC_DNR; int err_sctype = errval & ~NVME_STATUS_DNR;
if (errval < 0) { if (errval < 0) {
dev_err(ctrl->device, dev_err(ctrl->device,
...@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); ...@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
*/ */
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status) bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status)
{ {
if (status > 0 && (status & NVME_SC_DNR)) if (status > 0 && (status & NVME_STATUS_DNR))
return false; return false;
if (status == -EKEYREJECTED) if (status == -EKEYREJECTED)
......
...@@ -75,7 +75,7 @@ void nvme_should_fail(struct request *req) ...@@ -75,7 +75,7 @@ void nvme_should_fail(struct request *req)
/* inject status code and DNR bit */ /* inject status code and DNR bit */
status = fault_inject->status; status = fault_inject->status;
if (fault_inject->dont_retry) if (fault_inject->dont_retry)
status |= NVME_SC_DNR; status |= NVME_STATUS_DNR;
nvme_req(req)->status = status; nvme_req(req)->status = status;
} }
} }
......
...@@ -3132,7 +3132,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -3132,7 +3132,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.icdoff) { if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
ctrl->ctrl.icdoff); ctrl->ctrl.icdoff);
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out_stop_keep_alive; goto out_stop_keep_alive;
} }
...@@ -3140,7 +3140,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -3140,7 +3140,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"Mandatory sgls are not supported!\n"); "Mandatory sgls are not supported!\n");
ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out_stop_keep_alive; goto out_stop_keep_alive;
} }
...@@ -3325,7 +3325,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) ...@@ -3325,7 +3325,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else { } else {
if (portptr->port_state == FC_OBJSTATE_ONLINE) { if (portptr->port_state == FC_OBJSTATE_ONLINE) {
if (status > 0 && (status & NVME_SC_DNR)) if (status > 0 && (status & NVME_STATUS_DNR))
dev_warn(ctrl->ctrl.device, dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: reconnect failure\n", "NVME-FC{%d}: reconnect failure\n",
ctrl->cnum); ctrl->cnum);
......
...@@ -878,7 +878,7 @@ enum { ...@@ -878,7 +878,7 @@ enum {
NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1), NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1),
/* Set BLK_MQ_REQ_RESERVED when allocating request */ /* Set BLK_MQ_REQ_RESERVED when allocating request */
NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2), NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2),
/* Retry command when NVME_SC_DNR is not set in the result */ /* Retry command when NVME_STATUS_DNR is not set in the result */
NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3), NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3),
}; };
......
...@@ -344,7 +344,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) ...@@ -344,7 +344,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
pr_debug("unhandled lid %d on qid %d\n", pr_debug("unhandled lid %d on qid %d\n",
req->cmd->get_log_page.lid, req->sq->qid); req->cmd->get_log_page.lid, req->sq->qid);
req->error_loc = offsetof(struct nvme_get_log_page_command, lid); req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
} }
static void nvmet_execute_identify_ctrl(struct nvmet_req *req) static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
...@@ -496,7 +496,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) ...@@ -496,7 +496,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid); req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR; status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -662,7 +662,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req) ...@@ -662,7 +662,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off) off) != NVME_IDENTIFY_DATA_SIZE - off)
status = NVME_SC_INTERNAL | NVME_SC_DNR; status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
out: out:
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
...@@ -724,7 +724,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) ...@@ -724,7 +724,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
pr_debug("unhandled identify cns %d on qid %d\n", pr_debug("unhandled identify cns %d on qid %d\n",
req->cmd->identify.cns, req->sq->qid); req->cmd->identify.cns, req->sq->qid);
req->error_loc = offsetof(struct nvme_identify, cns); req->error_loc = offsetof(struct nvme_identify, cns);
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
} }
/* /*
...@@ -807,7 +807,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) ...@@ -807,7 +807,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
if (val32 & ~mask) { if (val32 & ~mask) {
req->error_loc = offsetof(struct nvme_common_command, cdw11); req->error_loc = offsetof(struct nvme_common_command, cdw11);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
...@@ -833,7 +833,7 @@ void nvmet_execute_set_features(struct nvmet_req *req) ...@@ -833,7 +833,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
ncqr = (cdw11 >> 16) & 0xffff; ncqr = (cdw11 >> 16) & 0xffff;
nsqr = cdw11 & 0xffff; nsqr = cdw11 & 0xffff;
if (ncqr == 0xffff || nsqr == 0xffff) { if (ncqr == 0xffff || nsqr == 0xffff) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break; break;
} }
nvmet_set_result(req, nvmet_set_result(req,
...@@ -846,14 +846,14 @@ void nvmet_execute_set_features(struct nvmet_req *req) ...@@ -846,14 +846,14 @@ void nvmet_execute_set_features(struct nvmet_req *req)
status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
break; break;
case NVME_FEAT_HOST_ID: case NVME_FEAT_HOST_ID:
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
break; break;
case NVME_FEAT_WRITE_PROTECT: case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req); status = nvmet_set_feat_write_protect(req);
break; break;
default: default:
req->error_loc = offsetof(struct nvme_common_command, cdw10); req->error_loc = offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break; break;
} }
...@@ -939,7 +939,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) ...@@ -939,7 +939,7 @@ void nvmet_execute_get_features(struct nvmet_req *req)
if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
req->error_loc = req->error_loc =
offsetof(struct nvme_common_command, cdw11); offsetof(struct nvme_common_command, cdw11);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break; break;
} }
...@@ -952,7 +952,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) ...@@ -952,7 +952,7 @@ void nvmet_execute_get_features(struct nvmet_req *req)
default: default:
req->error_loc = req->error_loc =
offsetof(struct nvme_common_command, cdw10); offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break; break;
} }
...@@ -969,7 +969,7 @@ void nvmet_execute_async_event(struct nvmet_req *req) ...@@ -969,7 +969,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
mutex_lock(&ctrl->lock); mutex_lock(&ctrl->lock);
if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
return; return;
} }
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
...@@ -1006,7 +1006,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) ...@@ -1006,7 +1006,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd)) if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_admin_cmd(req); return nvmet_parse_fabrics_admin_cmd(req);
if (unlikely(!nvmet_check_auth_status(req))) if (unlikely(!nvmet_check_auth_status(req)))
return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req); return nvmet_parse_discovery_cmd(req);
......
...@@ -55,18 +55,18 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) ...@@ -55,18 +55,18 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
case -ENOSPC: case -ENOSPC:
req->error_loc = offsetof(struct nvme_rw_command, length); req->error_loc = offsetof(struct nvme_rw_command, length);
return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
case -EREMOTEIO: case -EREMOTEIO:
req->error_loc = offsetof(struct nvme_rw_command, slba); req->error_loc = offsetof(struct nvme_rw_command, slba);
return NVME_SC_LBA_RANGE | NVME_SC_DNR; return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
case -EOPNOTSUPP: case -EOPNOTSUPP:
req->error_loc = offsetof(struct nvme_common_command, opcode); req->error_loc = offsetof(struct nvme_common_command, opcode);
switch (req->cmd->common.opcode) { switch (req->cmd->common.opcode) {
case nvme_cmd_dsm: case nvme_cmd_dsm:
case nvme_cmd_write_zeroes: case nvme_cmd_write_zeroes:
return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; return NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
default: default:
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
break; break;
case -ENODATA: case -ENODATA:
...@@ -76,7 +76,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) ...@@ -76,7 +76,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
fallthrough; fallthrough;
default: default:
req->error_loc = offsetof(struct nvme_common_command, opcode); req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INTERNAL | NVME_SC_DNR; return NVME_SC_INTERNAL | NVME_STATUS_DNR;
} }
} }
...@@ -86,7 +86,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req) ...@@ -86,7 +86,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
req->sq->qid); req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode); req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
...@@ -97,7 +97,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, ...@@ -97,7 +97,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
{ {
if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr); req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
} }
return 0; return 0;
} }
...@@ -106,7 +106,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) ...@@ -106,7 +106,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
{ {
if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr); req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
} }
return 0; return 0;
} }
...@@ -115,7 +115,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) ...@@ -115,7 +115,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
{ {
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
req->error_loc = offsetof(struct nvme_common_command, dptr); req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
} }
return 0; return 0;
} }
...@@ -145,7 +145,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) ...@@ -145,7 +145,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
while (ctrl->nr_async_event_cmds) { while (ctrl->nr_async_event_cmds) {
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR);
mutex_lock(&ctrl->lock); mutex_lock(&ctrl->lock);
} }
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
...@@ -444,7 +444,7 @@ u16 nvmet_req_find_ns(struct nvmet_req *req) ...@@ -444,7 +444,7 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
req->error_loc = offsetof(struct nvme_common_command, nsid); req->error_loc = offsetof(struct nvme_common_command, nsid);
if (nvmet_subsys_nsid_exists(subsys, nsid)) if (nvmet_subsys_nsid_exists(subsys, nsid))
return NVME_SC_INTERNAL_PATH_ERROR; return NVME_SC_INTERNAL_PATH_ERROR;
return NVME_SC_INVALID_NS | NVME_SC_DNR; return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
} }
percpu_ref_get(&req->ns->ref); percpu_ref_get(&req->ns->ref);
...@@ -904,7 +904,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) ...@@ -904,7 +904,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return nvmet_parse_fabrics_io_cmd(req); return nvmet_parse_fabrics_io_cmd(req);
if (unlikely(!nvmet_check_auth_status(req))) if (unlikely(!nvmet_check_auth_status(req)))
return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
ret = nvmet_check_ctrl_status(req); ret = nvmet_check_ctrl_status(req);
if (unlikely(ret)) if (unlikely(ret))
...@@ -966,7 +966,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -966,7 +966,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
/* no support for fused commands yet */ /* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags); req->error_loc = offsetof(struct nvme_common_command, flags);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail; goto fail;
} }
...@@ -977,7 +977,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -977,7 +977,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
*/ */
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
req->error_loc = offsetof(struct nvme_common_command, flags); req->error_loc = offsetof(struct nvme_common_command, flags);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail; goto fail;
} }
...@@ -995,7 +995,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -995,7 +995,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
trace_nvmet_req_init(req, req->cmd); trace_nvmet_req_init(req, req->cmd);
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto fail; goto fail;
} }
...@@ -1022,7 +1022,7 @@ bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) ...@@ -1022,7 +1022,7 @@ bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{ {
if (unlikely(len != req->transfer_len)) { if (unlikely(len != req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr); req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
return false; return false;
} }
...@@ -1034,7 +1034,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) ...@@ -1034,7 +1034,7 @@ bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
{ {
if (unlikely(data_len > req->transfer_len)) { if (unlikely(data_len > req->transfer_len)) {
req->error_loc = offsetof(struct nvme_common_command, dptr); req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR);
return false; return false;
} }
...@@ -1303,18 +1303,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req) ...@@ -1303,18 +1303,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req)
if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
pr_err("got cmd %d while CC.EN == 0 on qid = %d\n", pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
req->cmd->common.opcode, req->sq->qid); req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
} }
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n", pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
req->cmd->common.opcode, req->sq->qid); req->cmd->common.opcode, req->sq->qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
} }
if (unlikely(!nvmet_check_auth_status(req))) { if (unlikely(!nvmet_check_auth_status(req))) {
pr_warn("qid %d not authenticated\n", req->sq->qid); pr_warn("qid %d not authenticated\n", req->sq->qid);
return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR;
} }
return 0; return 0;
} }
...@@ -1388,7 +1388,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1388,7 +1388,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
int ret; int ret;
u16 status; u16 status;
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
subsys = nvmet_find_get_subsys(req->port, subsysnqn); subsys = nvmet_find_get_subsys(req->port, subsysnqn);
if (!subsys) { if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
...@@ -1404,7 +1404,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1404,7 +1404,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
hostnqn, subsysnqn); hostnqn, subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, dptr); req->error_loc = offsetof(struct nvme_common_command, dptr);
goto out_put_subsystem; goto out_put_subsystem;
} }
...@@ -1455,7 +1455,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -1455,7 +1455,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
subsys->cntlid_min, subsys->cntlid_max, subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
goto out_free_sqs; goto out_free_sqs;
} }
ctrl->cntlid = ret; ctrl->cntlid = ret;
......
...@@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) ...@@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
req->error_loc = req->error_loc =
offsetof(struct nvme_get_log_page_command, lid); offsetof(struct nvme_get_log_page_command, lid);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) ...@@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
if (offset & 0x3) { if (offset & 0x3) {
req->error_loc = req->error_loc =
offsetof(struct nvme_get_log_page_command, lpo); offsetof(struct nvme_get_log_page_command, lpo);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -256,7 +256,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) ...@@ -256,7 +256,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
req->error_loc = offsetof(struct nvme_identify, cns); req->error_loc = offsetof(struct nvme_identify, cns);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -320,7 +320,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req) ...@@ -320,7 +320,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
default: default:
req->error_loc = req->error_loc =
offsetof(struct nvme_common_command, cdw10); offsetof(struct nvme_common_command, cdw10);
stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break; break;
} }
...@@ -345,7 +345,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req) ...@@ -345,7 +345,7 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
default: default:
req->error_loc = req->error_loc =
offsetof(struct nvme_common_command, cdw10); offsetof(struct nvme_common_command, cdw10);
stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break; break;
} }
...@@ -361,7 +361,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) ...@@ -361,7 +361,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
cmd->common.opcode); cmd->common.opcode);
req->error_loc = req->error_loc =
offsetof(struct nvme_common_command, opcode); offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
switch (cmd->common.opcode) { switch (cmd->common.opcode) {
...@@ -386,7 +386,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) ...@@ -386,7 +386,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
default: default:
pr_debug("unhandled cmd %d\n", cmd->common.opcode); pr_debug("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode); req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
} }
......
...@@ -189,26 +189,26 @@ void nvmet_execute_auth_send(struct nvmet_req *req) ...@@ -189,26 +189,26 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
u8 dhchap_status; u8 dhchap_status;
if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = req->error_loc =
offsetof(struct nvmf_auth_send_command, secp); offsetof(struct nvmf_auth_send_command, secp);
goto done; goto done;
} }
if (req->cmd->auth_send.spsp0 != 0x01) { if (req->cmd->auth_send.spsp0 != 0x01) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = req->error_loc =
offsetof(struct nvmf_auth_send_command, spsp0); offsetof(struct nvmf_auth_send_command, spsp0);
goto done; goto done;
} }
if (req->cmd->auth_send.spsp1 != 0x01) { if (req->cmd->auth_send.spsp1 != 0x01) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = req->error_loc =
offsetof(struct nvmf_auth_send_command, spsp1); offsetof(struct nvmf_auth_send_command, spsp1);
goto done; goto done;
} }
tl = le32_to_cpu(req->cmd->auth_send.tl); tl = le32_to_cpu(req->cmd->auth_send.tl);
if (!tl) { if (!tl) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = req->error_loc =
offsetof(struct nvmf_auth_send_command, tl); offsetof(struct nvmf_auth_send_command, tl);
goto done; goto done;
...@@ -438,26 +438,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) ...@@ -438,26 +438,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
u16 status = 0; u16 status = 0;
if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = req->error_loc =
offsetof(struct nvmf_auth_receive_command, secp); offsetof(struct nvmf_auth_receive_command, secp);
goto done; goto done;
} }
if (req->cmd->auth_receive.spsp0 != 0x01) { if (req->cmd->auth_receive.spsp0 != 0x01) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = req->error_loc =
offsetof(struct nvmf_auth_receive_command, spsp0); offsetof(struct nvmf_auth_receive_command, spsp0);
goto done; goto done;
} }
if (req->cmd->auth_receive.spsp1 != 0x01) { if (req->cmd->auth_receive.spsp1 != 0x01) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = req->error_loc =
offsetof(struct nvmf_auth_receive_command, spsp1); offsetof(struct nvmf_auth_receive_command, spsp1);
goto done; goto done;
} }
al = le32_to_cpu(req->cmd->auth_receive.al); al = le32_to_cpu(req->cmd->auth_receive.al);
if (!al) { if (!al) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = req->error_loc =
offsetof(struct nvmf_auth_receive_command, al); offsetof(struct nvmf_auth_receive_command, al);
goto done; goto done;
......
...@@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) ...@@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
if (req->cmd->prop_set.attrib & 1) { if (req->cmd->prop_set.attrib & 1) {
req->error_loc = req->error_loc =
offsetof(struct nvmf_property_set_command, attrib); offsetof(struct nvmf_property_set_command, attrib);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) ...@@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req)
default: default:
req->error_loc = req->error_loc =
offsetof(struct nvmf_property_set_command, offset); offsetof(struct nvmf_property_set_command, offset);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
out: out:
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
...@@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) ...@@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
val = ctrl->cap; val = ctrl->cap;
break; break;
default: default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break; break;
} }
} else { } else {
...@@ -65,7 +65,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) ...@@ -65,7 +65,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
val = ctrl->csts; val = ctrl->csts;
break; break;
default: default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
break; break;
} }
} }
...@@ -105,7 +105,7 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) ...@@ -105,7 +105,7 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req)
pr_debug("received unknown capsule type 0x%x\n", pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype); cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
return 0; return 0;
...@@ -128,7 +128,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) ...@@ -128,7 +128,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req)
pr_debug("received unknown capsule type 0x%x\n", pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype); cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
return 0; return 0;
...@@ -147,14 +147,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) ...@@ -147,14 +147,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
pr_warn("queue size zero!\n"); pr_warn("queue size zero!\n");
req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
goto err; goto err;
} }
if (ctrl->sqs[qid] != NULL) { if (ctrl->sqs[qid] != NULL) {
pr_warn("qid %u has already been created\n", qid); pr_warn("qid %u has already been created\n", qid);
req->error_loc = offsetof(struct nvmf_connect_command, qid); req->error_loc = offsetof(struct nvmf_connect_command, qid);
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
} }
/* for fabrics, this value applies to only the I/O Submission Queues */ /* for fabrics, this value applies to only the I/O Submission Queues */
...@@ -163,14 +163,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) ...@@ -163,14 +163,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
sqsize, mqes, ctrl->cntlid); sqsize, mqes, ctrl->cntlid);
req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; return NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
} }
old = cmpxchg(&req->sq->ctrl, NULL, ctrl); old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
if (old) { if (old) {
pr_warn("queue already connected!\n"); pr_warn("queue already connected!\n");
req->error_loc = offsetof(struct nvmf_connect_command, opcode); req->error_loc = offsetof(struct nvmf_connect_command, opcode);
return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR;
} }
/* note: convert queue size from 0's-based value to 1's-based value */ /* note: convert queue size from 0's-based value to 1's-based value */
...@@ -233,14 +233,14 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -233,14 +233,14 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_warn("invalid connect version (%d).\n", pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt)); le16_to_cpu(c->recfmt));
req->error_loc = offsetof(struct nvmf_connect_command, recfmt); req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out; goto out;
} }
if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
pr_warn("connect attempt for invalid controller ID %#x\n", pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid); d->cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out; goto out;
} }
...@@ -260,7 +260,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -260,7 +260,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
dhchap_status); dhchap_status);
nvmet_ctrl_put(ctrl); nvmet_ctrl_put(ctrl);
if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED)
status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR);
else else
status = NVME_SC_INTERNAL; status = NVME_SC_INTERNAL;
goto out; goto out;
...@@ -311,7 +311,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ...@@ -311,7 +311,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (c->recfmt != 0) { if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n", pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt)); le16_to_cpu(c->recfmt));
status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -320,13 +320,13 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ...@@ -320,13 +320,13 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
le16_to_cpu(d->cntlid), req); le16_to_cpu(d->cntlid), req);
if (!ctrl) { if (!ctrl) {
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
goto out; goto out;
} }
if (unlikely(qid > ctrl->subsys->max_qid)) { if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid); pr_warn("invalid queue id (%d)\n", qid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR;
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put; goto out_ctrl_put;
} }
...@@ -356,13 +356,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req) ...@@ -356,13 +356,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
pr_debug("invalid command 0x%x on unconnected queue.\n", pr_debug("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode); cmd->fabrics.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode); req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
pr_debug("invalid capsule type 0x%x on unconnected queue.\n", pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
cmd->fabrics.fctype); cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
if (cmd->connect.qid == 0) if (cmd->connect.qid == 0)
......
...@@ -137,11 +137,11 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) ...@@ -137,11 +137,11 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
*/ */
switch (blk_sts) { switch (blk_sts) {
case BLK_STS_NOSPC: case BLK_STS_NOSPC:
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, length); req->error_loc = offsetof(struct nvme_rw_command, length);
break; break;
case BLK_STS_TARGET: case BLK_STS_TARGET:
status = NVME_SC_LBA_RANGE | NVME_SC_DNR; status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_rw_command, slba); req->error_loc = offsetof(struct nvme_rw_command, slba);
break; break;
case BLK_STS_NOTSUPP: case BLK_STS_NOTSUPP:
...@@ -149,10 +149,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) ...@@ -149,10 +149,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
switch (req->cmd->common.opcode) { switch (req->cmd->common.opcode) {
case nvme_cmd_dsm: case nvme_cmd_dsm:
case nvme_cmd_write_zeroes: case nvme_cmd_write_zeroes:
status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_STATUS_DNR;
break; break;
default: default:
status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
break; break;
case BLK_STS_MEDIUM: case BLK_STS_MEDIUM:
...@@ -161,7 +161,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) ...@@ -161,7 +161,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
break; break;
case BLK_STS_IOERR: case BLK_STS_IOERR:
default: default:
status = NVME_SC_INTERNAL | NVME_SC_DNR; status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_common_command, opcode); req->error_loc = offsetof(struct nvme_common_command, opcode);
} }
...@@ -358,7 +358,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req) ...@@ -358,7 +358,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req)
return 0; return 0;
if (blkdev_issue_flush(req->ns->bdev)) if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR; return NVME_SC_INTERNAL | NVME_STATUS_DNR;
return 0; return 0;
} }
......
...@@ -306,7 +306,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ...@@ -306,7 +306,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
ns = nvme_find_get_ns(ctrl, nsid); ns = nvme_find_get_ns(ctrl, nsid);
if (unlikely(!ns)) { if (unlikely(!ns)) {
pr_err("failed to get passthru ns nsid:%u\n", nsid); pr_err("failed to get passthru ns nsid:%u\n", nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR; status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -426,7 +426,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) ...@@ -426,7 +426,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
* emulated in the future if regular targets grow support for * emulated in the future if regular targets grow support for
* this feature. * this feature.
*/ */
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
return nvmet_setup_passthru_command(req); return nvmet_setup_passthru_command(req);
...@@ -478,7 +478,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) ...@@ -478,7 +478,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
case NVME_FEAT_RESV_PERSIST: case NVME_FEAT_RESV_PERSIST:
/* No reservations, see nvmet_parse_passthru_io_cmd() */ /* No reservations, see nvmet_parse_passthru_io_cmd() */
default: default:
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
} }
} }
...@@ -546,7 +546,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) ...@@ -546,7 +546,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
req->p.use_workqueue = true; req->p.use_workqueue = true;
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
} }
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
case NVME_ID_CNS_NS: case NVME_ID_CNS_NS:
req->execute = nvmet_passthru_execute_cmd; req->execute = nvmet_passthru_execute_cmd;
req->p.use_workqueue = true; req->p.use_workqueue = true;
...@@ -558,7 +558,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) ...@@ -558,7 +558,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
req->p.use_workqueue = true; req->p.use_workqueue = true;
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
} }
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
default: default:
return nvmet_setup_passthru_command(req); return nvmet_setup_passthru_command(req);
} }
......
...@@ -852,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) ...@@ -852,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
if (!nvme_is_write(rsp->req.cmd)) { if (!nvme_is_write(rsp->req.cmd)) {
rsp->req.error_loc = rsp->req.error_loc =
offsetof(struct nvme_common_command, opcode); offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
if (off + len > rsp->queue->dev->inline_data_size) { if (off + len > rsp->queue->dev->inline_data_size) {
pr_err("invalid inline data offset!\n"); pr_err("invalid inline data offset!\n");
return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
} }
/* no data command? */ /* no data command? */
...@@ -919,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) ...@@ -919,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
pr_err("invalid SGL subtype: %#x\n", sgl->type); pr_err("invalid SGL subtype: %#x\n", sgl->type);
rsp->req.error_loc = rsp->req.error_loc =
offsetof(struct nvme_common_command, dptr); offsetof(struct nvme_common_command, dptr);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
case NVME_KEY_SGL_FMT_DATA_DESC: case NVME_KEY_SGL_FMT_DATA_DESC:
switch (sgl->type & 0xf) { switch (sgl->type & 0xf) {
...@@ -931,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) ...@@ -931,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
pr_err("invalid SGL subtype: %#x\n", sgl->type); pr_err("invalid SGL subtype: %#x\n", sgl->type);
rsp->req.error_loc = rsp->req.error_loc =
offsetof(struct nvme_common_command, dptr); offsetof(struct nvme_common_command, dptr);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
default: default:
pr_err("invalid SGL type: %#x\n", sgl->type); pr_err("invalid SGL type: %#x\n", sgl->type);
rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
} }
} }
......
...@@ -416,10 +416,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) ...@@ -416,10 +416,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
NVME_SGL_FMT_OFFSET)) { NVME_SGL_FMT_OFFSET)) {
if (!nvme_is_write(cmd->req.cmd)) if (!nvme_is_write(cmd->req.cmd))
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
if (len > cmd->req.port->inline_data_size) if (len > cmd->req.port->inline_data_size)
return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR;
cmd->pdu_len = len; cmd->pdu_len = len;
} }
cmd->req.transfer_len += len; cmd->req.transfer_len += len;
......
...@@ -100,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) ...@@ -100,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid); req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR; status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -121,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) ...@@ -121,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req)
} }
if (!bdev_is_zoned(req->ns->bdev)) { if (!bdev_is_zoned(req->ns->bdev)) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
req->error_loc = offsetof(struct nvme_identify, nsid); req->error_loc = offsetof(struct nvme_identify, nsid);
goto out; goto out;
} }
...@@ -158,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) ...@@ -158,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
if (sect >= get_capacity(req->ns->bdev->bd_disk)) { if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba);
return NVME_SC_LBA_RANGE | NVME_SC_DNR; return NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
} }
if (out_bufsize < sizeof(struct nvme_zone_report)) { if (out_bufsize < sizeof(struct nvme_zone_report)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) {
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
switch (req->cmd->zmr.pr) { switch (req->cmd->zmr.pr) {
...@@ -177,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) ...@@ -177,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
break; break;
default: default:
req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
switch (req->cmd->zmr.zrasf) { switch (req->cmd->zmr.zrasf) {
...@@ -193,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) ...@@ -193,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req)
default: default:
req->error_loc = req->error_loc =
offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
...@@ -341,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) ...@@ -341,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret)
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
case -EINVAL: case -EINVAL:
case -EIO: case -EIO:
return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; return NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR;
default: default:
return NVME_SC_INTERNAL; return NVME_SC_INTERNAL;
} }
...@@ -463,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) ...@@ -463,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
default: default:
/* this is needed to quiet compiler warning */ /* this is needed to quiet compiler warning */
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR; return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
} }
return NVME_SC_SUCCESS; return NVME_SC_SUCCESS;
...@@ -481,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) ...@@ -481,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
if (op == REQ_OP_LAST) { if (op == REQ_OP_LAST) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa);
status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -493,13 +493,13 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) ...@@ -493,13 +493,13 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
if (sect >= get_capacity(bdev->bd_disk)) { if (sect >= get_capacity(bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
status = NVME_SC_LBA_RANGE | NVME_SC_DNR; status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
goto out; goto out;
} }
if (sect & (zone_sectors - 1)) { if (sect & (zone_sectors - 1)) {
req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -551,13 +551,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) ...@@ -551,13 +551,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (sect >= get_capacity(req->ns->bdev->bd_disk)) { if (sect >= get_capacity(req->ns->bdev->bd_disk)) {
req->error_loc = offsetof(struct nvme_rw_command, slba); req->error_loc = offsetof(struct nvme_rw_command, slba);
status = NVME_SC_LBA_RANGE | NVME_SC_DNR; status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR;
goto out; goto out;
} }
if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) {
req->error_loc = offsetof(struct nvme_rw_command, slba); req->error_loc = offsetof(struct nvme_rw_command, slba);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
goto out; goto out;
} }
...@@ -590,7 +590,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) ...@@ -590,7 +590,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
} }
if (total_len != nvmet_rw_data_len(req)) { if (total_len != nvmet_rw_data_len(req)) {
status = NVME_SC_INTERNAL | NVME_SC_DNR; status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto out_put_bio; goto out_put_bio;
} }
......
...@@ -1994,9 +1994,9 @@ enum { ...@@ -1994,9 +1994,9 @@ enum {
NVME_SCT_MASK = 0x0700, /* Status Code Type */ NVME_SCT_MASK = 0x0700, /* Status Code Type */
NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK, NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK,
NVME_SC_CRD = 0x1800, /* Command Retry Delayed */ NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */
NVME_SC_MORE = 0x2000, NVME_STATUS_MORE = 0x2000,
NVME_SC_DNR = 0x4000, /* Do Not Retry */ NVME_STATUS_DNR = 0x4000, /* Do Not Retry */
}; };
#define NVME_SCT(status) ((status) >> 8 & 7) #define NVME_SCT(status) ((status) >> 8 & 7)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment