Commit 6887fc64 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig

nvme: introduce nvme_start_request

In preparation for nvme-multipath IO stats accounting, we want the
accounting to happen in a centralized place. The request completion
is already centralized, but we need a common helper to request I/O
start.
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
parent 99722c8a
...@@ -763,7 +763,7 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -763,7 +763,7 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
goto out_free_cmd; goto out_free_cmd;
} }
blk_mq_start_request(req); nvme_start_request(req);
apple_nvme_submit_cmd(q, cmnd); apple_nvme_submit_cmd(q, cmnd);
return BLK_STS_OK; return BLK_STS_OK;
......
...@@ -2733,7 +2733,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, ...@@ -2733,7 +2733,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
atomic_set(&op->state, FCPOP_STATE_ACTIVE); atomic_set(&op->state, FCPOP_STATE_ACTIVE);
if (!(op->flags & FCOP_FLAGS_AEN)) if (!(op->flags & FCOP_FLAGS_AEN))
blk_mq_start_request(op->rq); nvme_start_request(op->rq);
cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
......
...@@ -1012,6 +1012,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) ...@@ -1012,6 +1012,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
} }
#endif #endif
static inline void nvme_start_request(struct request *rq)
{
blk_mq_start_request(rq);
}
static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
{ {
return ctrl->sgls & ((1 << 0) | (1 << 1)); return ctrl->sgls & ((1 << 0) | (1 << 1));
......
...@@ -907,7 +907,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) ...@@ -907,7 +907,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
goto out_unmap_data; goto out_unmap_data;
} }
blk_mq_start_request(req); nvme_start_request(req);
return BLK_STS_OK; return BLK_STS_OK;
out_unmap_data: out_unmap_data:
nvme_unmap_data(dev, req); nvme_unmap_data(dev, req);
......
...@@ -2040,7 +2040,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2040,7 +2040,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret) if (ret)
goto unmap_qe; goto unmap_qe;
blk_mq_start_request(rq); nvme_start_request(rq);
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
queue->pi_support && queue->pi_support &&
......
...@@ -2405,7 +2405,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2405,7 +2405,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
blk_mq_start_request(rq); nvme_start_request(rq);
nvme_tcp_queue_request(req, true, bd->last); nvme_tcp_queue_request(req, true, bd->last);
......
...@@ -145,7 +145,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -145,7 +145,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret) if (ret)
return ret; return ret;
blk_mq_start_request(req); nvme_start_request(req);
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
iod->req.port = queue->ctrl->port; iod->req.port = queue->ctrl->port;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq, if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment