Commit d49187e9 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: introduce struct nvme_request

This adds a shared per-request structure for all NVMe I/O.  This structure
is embedded as the first member in all NVMe transport drivers request
private data and allows to implement common functionality between the
drivers.

The first use is to replace the current abuse of the SCSI command
passthrough fields in struct request for the NVMe command passthrough,
but it will grow a field more fields to allow implementing things
like common abort handlers in the future.

The passthrough commands are handled by having a pointer to the SQE
(struct nvme_command) in struct nvme_request, and the union of the
possible result fields, which had to be turned from an anonymous
into a named union for that purpose.  This avoids having to pass
a reference to a full CQE around and thus makes checking the result
a lot more lightweight.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 41c9499b
...@@ -221,8 +221,7 @@ struct request *nvme_alloc_request(struct request_queue *q, ...@@ -221,8 +221,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
req->cmd_type = REQ_TYPE_DRV_PRIV; req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER; req->cmd_flags |= REQ_FAILFAST_DRIVER;
req->cmd = (unsigned char *)cmd; nvme_req(req)->cmd = cmd;
req->cmd_len = sizeof(struct nvme_command);
return req; return req;
} }
...@@ -321,7 +320,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, ...@@ -321,7 +320,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
int ret = 0; int ret = 0;
if (req->cmd_type == REQ_TYPE_DRV_PRIV) if (req->cmd_type == REQ_TYPE_DRV_PRIV)
memcpy(cmd, req->cmd, sizeof(*cmd)); memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
else if (req_op(req) == REQ_OP_FLUSH) else if (req_op(req) == REQ_OP_FLUSH)
nvme_setup_flush(ns, cmd); nvme_setup_flush(ns, cmd);
else if (req_op(req) == REQ_OP_DISCARD) else if (req_op(req) == REQ_OP_DISCARD)
...@@ -338,7 +337,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd); ...@@ -338,7 +337,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* if the result is positive, it's an NVM Express status code * if the result is positive, it's an NVM Express status code
*/ */
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen, union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, int flags) unsigned timeout, int qid, int at_head, int flags)
{ {
struct request *req; struct request *req;
...@@ -349,7 +348,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -349,7 +348,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req); return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT; req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
req->special = cqe;
if (buffer && bufflen) { if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
...@@ -358,6 +356,8 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -358,6 +356,8 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
} }
blk_execute_rq(req->q, NULL, req, at_head); blk_execute_rq(req->q, NULL, req, at_head);
if (result)
*result = nvme_req(req)->result;
ret = req->errors; ret = req->errors;
out: out:
blk_mq_free_request(req); blk_mq_free_request(req);
...@@ -379,7 +379,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -379,7 +379,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
u32 *result, unsigned timeout) u32 *result, unsigned timeout)
{ {
bool write = nvme_is_write(cmd); bool write = nvme_is_write(cmd);
struct nvme_completion cqe;
struct nvme_ns *ns = q->queuedata; struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL; struct gendisk *disk = ns ? ns->disk : NULL;
struct request *req; struct request *req;
...@@ -392,7 +391,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -392,7 +391,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req); return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT; req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
req->special = &cqe;
if (ubuffer && bufflen) { if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
...@@ -447,7 +445,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -447,7 +445,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
blk_execute_rq(req->q, disk, req, 0); blk_execute_rq(req->q, disk, req, 0);
ret = req->errors; ret = req->errors;
if (result) if (result)
*result = le32_to_cpu(cqe.result); *result = le32_to_cpu(nvme_req(req)->result.u32);
if (meta && !ret && !write) { if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len)) if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT; ret = -EFAULT;
...@@ -596,7 +594,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, ...@@ -596,7 +594,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
void *buffer, size_t buflen, u32 *result) void *buffer, size_t buflen, u32 *result)
{ {
struct nvme_command c; struct nvme_command c;
struct nvme_completion cqe; union nvme_result res;
int ret; int ret;
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
...@@ -604,10 +602,10 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, ...@@ -604,10 +602,10 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
c.features.nsid = cpu_to_le32(nsid); c.features.nsid = cpu_to_le32(nsid);
c.features.fid = cpu_to_le32(fid); c.features.fid = cpu_to_le32(fid);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0, ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0, 0);
if (ret >= 0 && result) if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result); *result = le32_to_cpu(res.u32);
return ret; return ret;
} }
...@@ -615,7 +613,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, ...@@ -615,7 +613,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
void *buffer, size_t buflen, u32 *result) void *buffer, size_t buflen, u32 *result)
{ {
struct nvme_command c; struct nvme_command c;
struct nvme_completion cqe; union nvme_result res;
int ret; int ret;
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
...@@ -623,10 +621,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, ...@@ -623,10 +621,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
c.features.fid = cpu_to_le32(fid); c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11); c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
buffer, buflen, 0, NVME_QID_ANY, 0, 0); buffer, buflen, 0, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result) if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result); *result = le32_to_cpu(res.u32);
return ret; return ret;
} }
...@@ -1901,7 +1899,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, ...@@ -1901,7 +1899,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl,
struct nvme_completion *cqe) struct nvme_completion *cqe)
{ {
u16 status = le16_to_cpu(cqe->status) >> 1; u16 status = le16_to_cpu(cqe->status) >> 1;
u32 result = le32_to_cpu(cqe->result); u32 result = le32_to_cpu(cqe->result.u32);
if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
++ctrl->event_limit; ++ctrl->event_limit;
......
...@@ -161,7 +161,7 @@ EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn); ...@@ -161,7 +161,7 @@ EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
{ {
struct nvme_command cmd; struct nvme_command cmd;
struct nvme_completion cqe; union nvme_result res;
int ret; int ret;
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
...@@ -169,11 +169,11 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) ...@@ -169,11 +169,11 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.fctype = nvme_fabrics_type_property_get; cmd.prop_get.fctype = nvme_fabrics_type_property_get;
cmd.prop_get.offset = cpu_to_le32(off); cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0, ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0, 0);
if (ret >= 0) if (ret >= 0)
*val = le64_to_cpu(cqe.result64); *val = le64_to_cpu(res.u64);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
dev_err(ctrl->device, dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n", "Property Get error: %d, offset %#x\n",
...@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32); ...@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
{ {
struct nvme_command cmd; struct nvme_command cmd;
struct nvme_completion cqe; union nvme_result res;
int ret; int ret;
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
...@@ -216,11 +216,11 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) ...@@ -216,11 +216,11 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.attrib = 1; cmd.prop_get.attrib = 1;
cmd.prop_get.offset = cpu_to_le32(off); cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0, ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
NVME_QID_ANY, 0, 0); NVME_QID_ANY, 0, 0);
if (ret >= 0) if (ret >= 0)
*val = le64_to_cpu(cqe.result64); *val = le64_to_cpu(res.u64);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
dev_err(ctrl->device, dev_err(ctrl->device,
"Property Get error: %d, offset %#x\n", "Property Get error: %d, offset %#x\n",
...@@ -368,7 +368,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, ...@@ -368,7 +368,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
{ {
struct nvme_command cmd; struct nvme_command cmd;
struct nvme_completion cqe; union nvme_result res;
struct nvmf_connect_data *data; struct nvmf_connect_data *data;
int ret; int ret;
...@@ -400,16 +400,16 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) ...@@ -400,16 +400,16 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
data, sizeof(*data), 0, NVME_QID_ANY, 1, data, sizeof(*data), 0, NVME_QID_ANY, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) { if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result), nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data); &cmd, data);
goto out_free_data; goto out_free_data;
} }
ctrl->cntlid = le16_to_cpu(cqe.result16); ctrl->cntlid = le16_to_cpu(res.u16);
out_free_data: out_free_data:
kfree(data); kfree(data);
...@@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
{ {
struct nvme_command cmd; struct nvme_command cmd;
struct nvmf_connect_data *data; struct nvmf_connect_data *data;
struct nvme_completion cqe; union nvme_result res;
int ret; int ret;
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
...@@ -459,11 +459,11 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) ...@@ -459,11 +459,11 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE); strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE); strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe, ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), 0, qid, 1, data, sizeof(*data), 0, qid, 1,
BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) { if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result), nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data); &cmd, data);
} }
kfree(data); kfree(data);
......
...@@ -146,14 +146,6 @@ struct nvme_nvm_command { ...@@ -146,14 +146,6 @@ struct nvme_nvm_command {
}; };
}; };
struct nvme_nvm_completion {
__le64 result; /* Used by LightNVM to return ppa completions */
__le16 sq_head; /* how much of this queue may be reclaimed */
__le16 sq_id; /* submission queue that generated this entry */
__u16 command_id; /* of the command which completed */
__le16 status; /* did the command fail, and if so, why? */
};
#define NVME_NVM_LP_MLC_PAIRS 886 #define NVME_NVM_LP_MLC_PAIRS 886
struct nvme_nvm_lp_mlc { struct nvme_nvm_lp_mlc {
__le16 num_pairs; __le16 num_pairs;
...@@ -481,11 +473,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, ...@@ -481,11 +473,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
static void nvme_nvm_end_io(struct request *rq, int error) static void nvme_nvm_end_io(struct request *rq, int error)
{ {
struct nvm_rq *rqd = rq->end_io_data; struct nvm_rq *rqd = rq->end_io_data;
struct nvme_nvm_completion *cqe = rq->special;
if (cqe)
rqd->ppa_status = le64_to_cpu(cqe->result);
rqd->ppa_status = nvme_req(rq)->result.u64;
nvm_end_io(rqd, error); nvm_end_io(rqd, error);
kfree(rq->cmd); kfree(rq->cmd);
...@@ -500,20 +489,18 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -500,20 +489,18 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct bio *bio = rqd->bio; struct bio *bio = rqd->bio;
struct nvme_nvm_command *cmd; struct nvme_nvm_command *cmd;
rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
if (IS_ERR(rq)) if (!cmd)
return -ENOMEM; return -ENOMEM;
cmd = kzalloc(sizeof(struct nvme_nvm_command) + rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
sizeof(struct nvme_nvm_completion), GFP_KERNEL); if (IS_ERR(rq)) {
if (!cmd) { kfree(cmd);
blk_mq_free_request(rq);
return -ENOMEM; return -ENOMEM;
} }
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->ioprio = bio_prio(bio); rq->ioprio = bio_prio(bio);
if (bio_has_data(bio)) if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio); rq->nr_phys_segments = bio_phys_segments(q, bio);
...@@ -522,10 +509,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -522,10 +509,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
nvme_nvm_rqtocmd(rq, rqd, ns, cmd); nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
rq->cmd = (unsigned char *)cmd;
rq->cmd_len = sizeof(struct nvme_nvm_command);
rq->special = cmd + 1;
rq->end_io_data = rqd; rq->end_io_data = rqd;
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io); blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
......
...@@ -79,6 +79,20 @@ enum nvme_quirks { ...@@ -79,6 +79,20 @@ enum nvme_quirks {
NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
}; };
/*
* Common request structure for NVMe passthrough. All drivers must have
* this structure as the first member of their request-private data.
*/
struct nvme_request {
struct nvme_command *cmd;
union nvme_result result;
};
static inline struct nvme_request *nvme_req(struct request *req)
{
return blk_mq_rq_to_pdu(req);
}
/* The below value is the specific amount of delay needed before checking /* The below value is the specific amount of delay needed before checking
* readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
...@@ -278,7 +292,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, ...@@ -278,7 +292,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen); void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen, union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head, int flags); unsigned timeout, int qid, int at_head, int flags);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result, void __user *ubuffer, unsigned bufflen, u32 *result,
......
...@@ -140,6 +140,7 @@ struct nvme_queue { ...@@ -140,6 +140,7 @@ struct nvme_queue {
* allocated to store the PRP list. * allocated to store the PRP list.
*/ */
struct nvme_iod { struct nvme_iod {
struct nvme_request req;
struct nvme_queue *nvmeq; struct nvme_queue *nvmeq;
int aborted; int aborted;
int npages; /* In the PRP list. 0 means small pool in use */ int npages; /* In the PRP list. 0 means small pool in use */
...@@ -707,8 +708,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) ...@@ -707,8 +708,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
} }
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) nvme_req(req)->result = cqe.result;
memcpy(req->special, &cqe, sizeof(cqe));
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1); blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
} }
......
...@@ -66,6 +66,7 @@ struct nvme_rdma_qe { ...@@ -66,6 +66,7 @@ struct nvme_rdma_qe {
struct nvme_rdma_queue; struct nvme_rdma_queue;
struct nvme_rdma_request { struct nvme_rdma_request {
struct nvme_request req;
struct ib_mr *mr; struct ib_mr *mr;
struct nvme_rdma_qe sqe; struct nvme_rdma_qe sqe;
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS]; struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
...@@ -1117,13 +1118,10 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx) ...@@ -1117,13 +1118,10 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
struct nvme_completion *cqe, struct ib_wc *wc, int tag) struct nvme_completion *cqe, struct ib_wc *wc, int tag)
{ {
u16 status = le16_to_cpu(cqe->status);
struct request *rq; struct request *rq;
struct nvme_rdma_request *req; struct nvme_rdma_request *req;
int ret = 0; int ret = 0;
status >>= 1;
rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id); rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
if (!rq) { if (!rq) {
dev_err(queue->ctrl->ctrl.device, dev_err(queue->ctrl->ctrl.device,
...@@ -1134,9 +1132,6 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, ...@@ -1134,9 +1132,6 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
} }
req = blk_mq_rq_to_pdu(rq); req = blk_mq_rq_to_pdu(rq);
if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
memcpy(rq->special, cqe, sizeof(*cqe));
if (rq->tag == tag) if (rq->tag == tag)
ret = 1; ret = 1;
...@@ -1144,8 +1139,8 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, ...@@ -1144,8 +1139,8 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
wc->ex.invalidate_rkey == req->mr->rkey) wc->ex.invalidate_rkey == req->mr->rkey)
req->mr->need_inval = false; req->mr->need_inval = false;
blk_mq_complete_request(rq, status); req->req.result = cqe->result;
blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
return ret; return ret;
} }
......
...@@ -617,7 +617,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, ...@@ -617,7 +617,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!subsys) { if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn); subsysnqn);
req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
} }
...@@ -638,7 +638,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, ...@@ -638,7 +638,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
pr_warn("could not find controller %d for subsys %s / host %s\n", pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn); cntlid, subsysnqn, hostnqn);
req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out: out:
...@@ -700,7 +700,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -700,7 +700,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!subsys) { if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn); subsysnqn);
req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
goto out; goto out;
} }
...@@ -709,7 +709,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -709,7 +709,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!nvmet_host_allowed(req, subsys, hostnqn)) { if (!nvmet_host_allowed(req, subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n", pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn); hostnqn, subsysnqn);
req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem); up_read(&nvmet_config_sem);
goto out_put_subsystem; goto out_put_subsystem;
} }
......
...@@ -69,7 +69,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) ...@@ -69,7 +69,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
} }
} }
req->rsp->result64 = cpu_to_le64(val); req->rsp->result.u64 = cpu_to_le64(val);
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
} }
...@@ -125,7 +125,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -125,7 +125,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
d = kmap(sg_page(req->sg)) + req->sg->offset; d = kmap(sg_page(req->sg)) + req->sg->offset;
/* zero out initial completion result, assign values as needed */ /* zero out initial completion result, assign values as needed */
req->rsp->result = 0; req->rsp->result.u32 = 0;
if (c->recfmt != 0) { if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n", pr_warn("invalid connect version (%d).\n",
...@@ -138,7 +138,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -138,7 +138,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_warn("connect attempt for invalid controller ID %#x\n", pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid); d->cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid); req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out; goto out;
} }
...@@ -155,7 +155,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -155,7 +155,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_info("creating controller %d for NQN %s.\n", pr_info("creating controller %d for NQN %s.\n",
ctrl->cntlid, ctrl->hostnqn); ctrl->cntlid, ctrl->hostnqn);
req->rsp->result16 = cpu_to_le16(ctrl->cntlid); req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
out: out:
kunmap(sg_page(req->sg)); kunmap(sg_page(req->sg));
...@@ -173,7 +173,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ...@@ -173,7 +173,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
d = kmap(sg_page(req->sg)) + req->sg->offset; d = kmap(sg_page(req->sg)) + req->sg->offset;
/* zero out initial completion result, assign values as needed */ /* zero out initial completion result, assign values as needed */
req->rsp->result = 0; req->rsp->result.u32 = 0;
if (c->recfmt != 0) { if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n", pr_warn("invalid connect version (%d).\n",
...@@ -191,14 +191,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ...@@ -191,14 +191,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (unlikely(qid > ctrl->subsys->max_qid)) { if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid); pr_warn("invalid queue id (%d)\n", qid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
req->rsp->result = IPO_IATTR_CONNECT_SQE(qid); req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put; goto out_ctrl_put;
} }
status = nvmet_install_queue(ctrl, req); status = nvmet_install_queue(ctrl, req);
if (status) { if (status) {
/* pass back cntlid that had the issue of installing queue */ /* pass back cntlid that had the issue of installing queue */
req->rsp->result16 = cpu_to_le16(ctrl->cntlid); req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
goto out_ctrl_put; goto out_ctrl_put;
} }
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS) (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
struct nvme_loop_iod { struct nvme_loop_iod {
struct nvme_request nvme_req;
struct nvme_command cmd; struct nvme_command cmd;
struct nvme_completion rsp; struct nvme_completion rsp;
struct nvmet_req req; struct nvmet_req req;
...@@ -112,10 +113,10 @@ static void nvme_loop_complete_rq(struct request *req) ...@@ -112,10 +113,10 @@ static void nvme_loop_complete_rq(struct request *req)
blk_mq_end_request(req, error); blk_mq_end_request(req, error);
} }
static void nvme_loop_queue_response(struct nvmet_req *nvme_req) static void nvme_loop_queue_response(struct nvmet_req *req)
{ {
struct nvme_loop_iod *iod = struct nvme_loop_iod *iod =
container_of(nvme_req, struct nvme_loop_iod, req); container_of(req, struct nvme_loop_iod, req);
struct nvme_completion *cqe = &iod->rsp; struct nvme_completion *cqe = &iod->rsp;
/* /*
...@@ -128,11 +129,10 @@ static void nvme_loop_queue_response(struct nvmet_req *nvme_req) ...@@ -128,11 +129,10 @@ static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) { cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe); nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
} else { } else {
struct request *req = blk_mq_rq_from_pdu(iod); struct request *rq = blk_mq_rq_from_pdu(iod);
if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special) iod->nvme_req.result = cqe->result;
memcpy(req->special, cqe, sizeof(*cqe)); blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
} }
} }
......
...@@ -238,7 +238,7 @@ static inline void nvmet_set_status(struct nvmet_req *req, u16 status) ...@@ -238,7 +238,7 @@ static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
static inline void nvmet_set_result(struct nvmet_req *req, u32 result) static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{ {
req->rsp->result = cpu_to_le32(result); req->rsp->result.u32 = cpu_to_le32(result);
} }
/* /*
......
...@@ -949,11 +949,11 @@ struct nvme_completion { ...@@ -949,11 +949,11 @@ struct nvme_completion {
/* /*
* Used by Admin and Fabrics commands to return data: * Used by Admin and Fabrics commands to return data:
*/ */
union { union nvme_result {
__le16 result16; __le16 u16;
__le32 result; __le32 u32;
__le64 result64; __le64 u64;
}; } result;
__le16 sq_head; /* how much of this queue may be reclaimed */ __le16 sq_head; /* how much of this queue may be reclaimed */
__le16 sq_id; /* submission queue that generated this entry */ __le16 sq_id; /* submission queue that generated this entry */
__u16 command_id; /* of the command which completed */ __u16 command_id; /* of the command which completed */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment