Commit 721b3917 authored by James Smart's avatar James Smart Committed by Sagi Grimberg

nvme-fabrics: set sqe.command_id in core not transports

Currently, core.c sets command_id only on rd/wr commands, leaving it to
the transport to set it again to ensure the request had a command id.

Move location of set in core so applies to all commands.
Remove transport sets.
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarJay Freyensee <james_p_freyensee@linux.intel.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent a317178e
...@@ -303,7 +303,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, ...@@ -303,7 +303,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd)); memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id); cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
...@@ -345,6 +344,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, ...@@ -345,6 +344,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
else else
nvme_setup_rw(ns, req, cmd); nvme_setup_rw(ns, req, cmd);
cmd->common.command_id = req->tag;
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(nvme_setup_cmd); EXPORT_SYMBOL_GPL(nvme_setup_cmd);
......
...@@ -611,7 +611,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -611,7 +611,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK) if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out; goto out;
cmnd.common.command_id = req->tag;
blk_mq_start_request(req); blk_mq_start_request(req);
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
......
...@@ -1399,7 +1399,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1399,7 +1399,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK) if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret; return ret;
c->common.command_id = rq->tag;
blk_mq_start_request(rq); blk_mq_start_request(rq);
map_len = nvme_map_len(rq); map_len = nvme_map_len(rq);
......
...@@ -194,7 +194,6 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -194,7 +194,6 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
BUG_ON(iod->req.sg_cnt > req->nr_phys_segments); BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
} }
iod->cmd.common.command_id = req->tag;
blk_mq_start_request(req); blk_mq_start_request(req);
schedule_work(&iod->work); schedule_work(&iod->work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment