Commit be3f3114 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvmet: Open code nvmet_req_execute()

Now that nvmet_req_execute does nothing, open code it.
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
[split patch, update changelog]
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e9061c39
...@@ -942,12 +942,6 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len) ...@@ -942,12 +942,6 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
} }
EXPORT_SYMBOL_GPL(nvmet_check_data_len); EXPORT_SYMBOL_GPL(nvmet_check_data_len);
void nvmet_req_execute(struct nvmet_req *req)
{
req->execute(req);
}
EXPORT_SYMBOL_GPL(nvmet_req_execute);
int nvmet_req_alloc_sgl(struct nvmet_req *req) int nvmet_req_alloc_sgl(struct nvmet_req *req)
{ {
struct pci_dev *p2p_dev = NULL; struct pci_dev *p2p_dev = NULL;
......
...@@ -2018,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) ...@@ -2018,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
} }
/* data transfer complete, resume with nvmet layer */ /* data transfer complete, resume with nvmet layer */
nvmet_req_execute(&fod->req); fod->req.execute(&fod->req);
break; break;
case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA:
...@@ -2234,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, ...@@ -2234,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
* can invoke the nvmet_layer now. If read data, cmd completion will * can invoke the nvmet_layer now. If read data, cmd completion will
* push the data * push the data
*/ */
nvmet_req_execute(&fod->req); fod->req.execute(&fod->req);
return; return;
transport_error: transport_error:
......
...@@ -125,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work) ...@@ -125,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
struct nvme_loop_iod *iod = struct nvme_loop_iod *iod =
container_of(work, struct nvme_loop_iod, work); container_of(work, struct nvme_loop_iod, work);
nvmet_req_execute(&iod->req); iod->req.execute(&iod->req);
} }
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
......
...@@ -374,7 +374,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -374,7 +374,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req); void nvmet_req_uninit(struct nvmet_req *req);
bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len); bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
void nvmet_req_execute(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status); void nvmet_req_complete(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgl(struct nvmet_req *req); int nvmet_req_alloc_sgl(struct nvmet_req *req);
void nvmet_req_free_sgl(struct nvmet_req *req); void nvmet_req_free_sgl(struct nvmet_req *req);
......
...@@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
return; return;
} }
nvmet_req_execute(&rsp->req); rsp->req.execute(&rsp->req);
} }
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
...@@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) ...@@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
queue->cm_id->port_num, &rsp->read_cqe, NULL)) queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else { } else {
nvmet_req_execute(&rsp->req); rsp->req.execute(&rsp->req);
} }
return true; return true;
......
...@@ -930,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) ...@@ -930,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
goto out; goto out;
} }
nvmet_req_execute(&queue->cmd->req); queue->cmd->req.execute(&queue->cmd->req);
out: out:
nvmet_prepare_receive_pdu(queue); nvmet_prepare_receive_pdu(queue);
return ret; return ret;
...@@ -1050,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) ...@@ -1050,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
nvmet_tcp_prep_recv_ddgst(cmd); nvmet_tcp_prep_recv_ddgst(cmd);
return 0; return 0;
} }
nvmet_req_execute(&cmd->req); cmd->req.execute(&cmd->req);
} }
nvmet_prepare_receive_pdu(queue); nvmet_prepare_receive_pdu(queue);
...@@ -1090,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) ...@@ -1090,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) cmd->rbytes_done == cmd->req.transfer_len)
nvmet_req_execute(&cmd->req); cmd->req.execute(&cmd->req);
ret = 0; ret = 0;
out: out:
nvmet_prepare_receive_pdu(queue); nvmet_prepare_receive_pdu(queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment