Commit 19b58d94 authored by James Smart's avatar James Smart Committed by Christoph Hellwig

nvmet_fc: add req_release to lldd api

With the advent of the opdone calls changing context, the lldd can no
longer assume that once the op->done call returns for RSP operations
that the request struct is no longer being accessed.

As such, revise the lldd api for a req_release callback that the
transport will call when the job is complete. This will also be used
with abort cases.

Fixed text in api header for change in io complete semantics.

Revised lpfc to support the new req_release api.
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent 39498fae
...@@ -482,6 +482,8 @@ static void ...@@ -482,6 +482,8 @@ static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
struct nvmet_fc_fcp_iod *fod) struct nvmet_fc_fcp_iod *fod)
{ {
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct nvmet_fc_tgtport *tgtport = fod->tgtport;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&queue->qlock, flags); spin_lock_irqsave(&queue->qlock, flags);
...@@ -493,6 +495,8 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, ...@@ -493,6 +495,8 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
* release the reference taken at queue lookup and fod allocation * release the reference taken at queue lookup and fod allocation
*/ */
nvmet_fc_tgt_q_put(queue); nvmet_fc_tgt_q_put(queue);
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
} }
static int static int
...@@ -849,7 +853,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, ...@@ -849,7 +853,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
int ret, idx; int ret, idx;
if (!template->xmt_ls_rsp || !template->fcp_op || if (!template->xmt_ls_rsp || !template->fcp_op ||
!template->targetport_delete || !template->fcp_req_release || !template->targetport_delete ||
!template->max_hw_queues || !template->max_sgl_segments || !template->max_hw_queues || !template->max_sgl_segments ||
!template->max_dif_sgl_segments || !template->dma_boundary) { !template->max_dif_sgl_segments || !template->dma_boundary) {
ret = -EINVAL; ret = -EINVAL;
...@@ -2124,7 +2128,7 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) ...@@ -2124,7 +2128,7 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
* If this routine returns error, the lldd should abort the exchange. * If this routine returns error, the lldd should abort the exchange.
* *
* @target_port: pointer to the (registered) target port the FCP CMD IU * @target_port: pointer to the (registered) target port the FCP CMD IU
* was receive on. * was received on.
* @fcpreq: pointer to a fcpreq request structure to be used to reference * @fcpreq: pointer to a fcpreq request structure to be used to reference
* the exchange corresponding to the FCP Exchange. * the exchange corresponding to the FCP Exchange.
* @cmdiubuf: pointer to the buffer containing the FCP CMD IU * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
......
...@@ -492,14 +492,18 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, ...@@ -492,14 +492,18 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
tgt_fcpreq->fcp_error = fcp_err; tgt_fcpreq->fcp_error = fcp_err;
tgt_fcpreq->done(tgt_fcpreq); tgt_fcpreq->done(tgt_fcpreq);
if ((!fcp_err) && (op == NVMET_FCOP_RSP ||
op == NVMET_FCOP_READDATA_RSP ||
op == NVMET_FCOP_ABORT))
schedule_work(&tfcp_req->work);
return 0; return 0;
} }
static void
fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *tgt_fcpreq)
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
schedule_work(&tfcp_req->work);
}
static void static void
fcloop_ls_abort(struct nvme_fc_local_port *localport, fcloop_ls_abort(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *remoteport, struct nvme_fc_remote_port *remoteport,
...@@ -570,6 +574,7 @@ struct nvmet_fc_target_template tgttemplate = { ...@@ -570,6 +574,7 @@ struct nvmet_fc_target_template tgttemplate = {
.targetport_delete = fcloop_targetport_delete, .targetport_delete = fcloop_targetport_delete,
.xmt_ls_rsp = fcloop_xmt_ls_rsp, .xmt_ls_rsp = fcloop_xmt_ls_rsp,
.fcp_op = fcloop_fcp_op, .fcp_op = fcloop_fcp_op,
.fcp_req_release = fcloop_fcp_req_release,
.max_hw_queues = FCLOOP_HW_QUEUES, .max_hw_queues = FCLOOP_HW_QUEUES,
.max_sgl_segments = FCLOOP_SGL_SEGS, .max_sgl_segments = FCLOOP_SGL_SEGS,
.max_dif_sgl_segments = FCLOOP_SGL_SEGS, .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
......
...@@ -408,9 +408,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ...@@ -408,9 +408,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (phba->ktime_on) if (phba->ktime_on)
lpfc_nvmet_ktime(phba, ctxp); lpfc_nvmet_ktime(phba, ctxp);
#endif #endif
/* Let Abort cmpl repost the context */ /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
if (!(ctxp->flag & LPFC_NVMET_ABORT_OP))
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
} else { } else {
ctxp->entry_cnt++; ctxp->entry_cnt++;
start_clean = offsetof(struct lpfc_iocbq, wqe); start_clean = offsetof(struct lpfc_iocbq, wqe);
...@@ -634,10 +632,47 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport) ...@@ -634,10 +632,47 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
complete(&tport->tport_unreg_done); complete(&tport->tport_unreg_done);
} }
static void
lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *rsp)
{
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
struct lpfc_nvmet_rcv_ctx *ctxp =
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
struct lpfc_hba *phba = ctxp->phba;
unsigned long flags;
bool aborting = false;
spin_lock_irqsave(&ctxp->ctxlock, flags);
if (ctxp->flag & LPFC_NVMET_ABORT_OP) {
aborting = true;
ctxp->flag |= LPFC_NVMET_CTX_RLS;
}
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
if (aborting)
/* let the abort path do the real release */
return;
/* Sanity check */
if (ctxp->state != LPFC_NVMET_STE_DONE) {
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6117 Bad state IO x%x aborted\n",
ctxp->oxid);
}
lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
ctxp->state, 0);
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
}
static struct nvmet_fc_target_template lpfc_tgttemplate = { static struct nvmet_fc_target_template lpfc_tgttemplate = {
.targetport_delete = lpfc_nvmet_targetport_delete, .targetport_delete = lpfc_nvmet_targetport_delete,
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
.fcp_op = lpfc_nvmet_xmt_fcp_op, .fcp_op = lpfc_nvmet_xmt_fcp_op,
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
.max_hw_queues = 1, .max_hw_queues = 1,
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
...@@ -834,6 +869,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ...@@ -834,6 +869,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ctxp->wqeq = NULL; ctxp->wqeq = NULL;
ctxp->state = LPFC_NVMET_STE_RCV; ctxp->state = LPFC_NVMET_STE_RCV;
ctxp->rqb_buffer = (void *)nvmebuf; ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
oxid, size, sid); oxid, size, sid);
...@@ -1595,6 +1631,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ...@@ -1595,6 +1631,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_rcv_ctx *ctxp; struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
uint32_t status, result; uint32_t status, result;
unsigned long flags;
bool released = false;
ctxp = cmdwqe->context2; ctxp = cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe); status = bf_get(lpfc_wcqe_c_status, wcqe);
...@@ -1609,7 +1647,18 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ...@@ -1609,7 +1647,18 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
result, wcqe->word3); result, wcqe->word3);
ctxp->state = LPFC_NVMET_STE_DONE; ctxp->state = LPFC_NVMET_STE_DONE;
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); spin_lock_irqsave(&ctxp->ctxlock, flags);
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
released = true;
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
*/
if (released)
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
cmdwqe->context2 = NULL; cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL; cmdwqe->context3 = NULL;
...@@ -1632,7 +1681,9 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ...@@ -1632,7 +1681,9 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
{ {
struct lpfc_nvmet_rcv_ctx *ctxp; struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
unsigned long flags;
uint32_t status, result; uint32_t status, result;
bool released = false;
ctxp = cmdwqe->context2; ctxp = cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe); status = bf_get(lpfc_wcqe_c_status, wcqe);
...@@ -1654,7 +1705,19 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ...@@ -1654,7 +1705,19 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ctxp->state, ctxp->oxid); ctxp->state, ctxp->oxid);
} }
ctxp->state = LPFC_NVMET_STE_DONE; ctxp->state = LPFC_NVMET_STE_DONE;
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf); spin_lock_irqsave(&ctxp->ctxlock, flags);
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
released = true;
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
*/
if (released)
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
cmdwqe->context2 = NULL; cmdwqe->context2 = NULL;
cmdwqe->context3 = NULL; cmdwqe->context3 = NULL;
} }
......
...@@ -81,6 +81,7 @@ struct lpfc_nvmet_rcv_ctx { ...@@ -81,6 +81,7 @@ struct lpfc_nvmet_rcv_ctx {
struct lpfc_iocbq *wqeq; struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq; struct lpfc_iocbq *abort_wqeq;
dma_addr_t txrdy_phys; dma_addr_t txrdy_phys;
spinlock_t ctxlock; /* protect flag access */
uint32_t *txrdy; uint32_t *txrdy;
uint32_t sid; uint32_t sid;
uint32_t offset; uint32_t offset;
...@@ -97,8 +98,10 @@ struct lpfc_nvmet_rcv_ctx { ...@@ -97,8 +98,10 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_STE_RSP 4 #define LPFC_NVMET_STE_RSP 4
#define LPFC_NVMET_STE_DONE 5 #define LPFC_NVMET_STE_DONE 5
uint16_t flag; uint16_t flag;
#define LPFC_NVMET_IO_INP 1 #define LPFC_NVMET_IO_INP 0x1
#define LPFC_NVMET_ABORT_OP 2 #define LPFC_NVMET_ABORT_OP 0x2
#define LPFC_NVMET_CTX_RLS 0x4
struct rqb_dmabuf *rqb_buffer; struct rqb_dmabuf *rqb_buffer;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
......
...@@ -741,12 +741,12 @@ struct nvmet_fc_target_port { ...@@ -741,12 +741,12 @@ struct nvmet_fc_target_port {
* be freed/released. * be freed/released.
* Entrypoint is Mandatory. * Entrypoint is Mandatory.
* *
* @fcp_op: Called to perform a data transfer, transmit a response, or * @fcp_op: Called to perform a data transfer or transmit a response.
* abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
* LLDD-supplied exchange structure specified in the * exchange structure specified in the nvmet_fc_rcv_fcp_req() call
* nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received. * made when the FCP CMD IU was received. The op field in the
* The op field in the structure shall indicate the operation for * structure shall indicate the operation for the LLDD to perform
* the LLDD to perform relative to the io. * relative to the io.
* NVMET_FCOP_READDATA operation: the LLDD is to send the * NVMET_FCOP_READDATA operation: the LLDD is to send the
* payload data (described by sglist) to the host in 1 or * payload data (described by sglist) to the host in 1 or
* more FC sequences (preferrably 1). Note: the fc-nvme layer * more FC sequences (preferrably 1). Note: the fc-nvme layer
...@@ -768,29 +768,35 @@ struct nvmet_fc_target_port { ...@@ -768,29 +768,35 @@ struct nvmet_fc_target_port {
* successfully, the LLDD is to update the nvmefc_tgt_fcp_req * successfully, the LLDD is to update the nvmefc_tgt_fcp_req
* transferred_length field and may subsequently transmit the * transferred_length field and may subsequently transmit the
* FCP_RSP iu payload (described by rspbuf, rspdma, rsplen). * FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
* The LLDD is to await FCP_CONF reception to confirm the RSP * If FCP_CONF is supported, the LLDD is to await FCP_CONF
* reception by the host. The LLDD may retramsit the FCP_RSP iu * reception to confirm the RSP reception by the host. The LLDD
* if necessary per FC-NVME. Upon reception of FCP_CONF, or upon * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon
* FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req * transmission of the FCP_RSP iu if FCP_CONF is not supported,
* fcp_error field and consider the operation complete.. * or upon success/failure of FCP_CONF if it is supported, the
* LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
* consider the operation complete.
* NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload * NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
* (described by rspbuf, rspdma, rsplen). The LLDD is to await * (described by rspbuf, rspdma, rsplen). If FCP_CONF is
* FCP_CONF reception to confirm the RSP reception by the host. * supported, the LLDD is to await FCP_CONF reception to confirm
* The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME. * the RSP reception by the host. The LLDD may retramsit the
* Upon reception of FCP_CONF, or upon FCP_CONF failure, the * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon
* transmission of the FCP_RSP iu if FCP_CONF is not supported,
* or upon success/failure of FCP_CONF if it is supported, the
* LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
* consider the operation complete.. * consider the operation complete.
* NVMET_FCOP_ABORT: the LLDD is to terminate the exchange * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
* corresponding to the fcp operation. The LLDD shall send * corresponding to the fcp operation. The LLDD shall send
* ABTS and follow FC exchange abort-multi rules, including * ABTS and follow FC exchange abort-multi rules, including
* ABTS retries and possible logout. * ABTS retries and possible logout.
* Upon completing the indicated operation, the LLDD is to set the * Upon completing the indicated operation, the LLDD is to set the
* status fields for the operation (tranferred_length and fcp_error * status fields for the operation (tranferred_length and fcp_error
* status) in the request, then all the "done" routine * status) in the request, then call the "done" routine
* indicated in the fcp request. Upon return from the "done" * indicated in the fcp request. After the operation completes,
* routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation * regardless of whether the FCP_RSP iu was successfully transmit,
* the fc-nvme layer will not longer reference the fcp request, * the LLDD-supplied exchange structure must remain valid until the
* allowing the LLDD to free/release the fcp request. * transport calls the fcp_req_release() callback to return ownership
* of the exchange structure back to the LLDD so that it may be used
* for another fcp command.
* Note: when calling the done routine for READDATA or WRITEDATA * Note: when calling the done routine for READDATA or WRITEDATA
* operations, the fc-nvme layer may immediate convert, in the same * operations, the fc-nvme layer may immediate convert, in the same
* thread and before returning to the LLDD, the fcp operation to * thread and before returning to the LLDD, the fcp operation to
...@@ -802,6 +808,11 @@ struct nvmet_fc_target_port { ...@@ -802,6 +808,11 @@ struct nvmet_fc_target_port {
* Returns 0 on success, -<errno> on failure (Ex: -EIO) * Returns 0 on success, -<errno> on failure (Ex: -EIO)
* Entrypoint is Mandatory. * Entrypoint is Mandatory.
* *
* @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
* to the LLDD after all operations on the fcp operation are complete.
* This may be due to the command completing or upon completion of
* abort cleanup.
*
* @max_hw_queues: indicates the maximum number of hw queues the LLDD * @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization. * supports for cpu affinitization.
* Value is Mandatory. Must be at least 1. * Value is Mandatory. Must be at least 1.
...@@ -836,7 +847,9 @@ struct nvmet_fc_target_template { ...@@ -836,7 +847,9 @@ struct nvmet_fc_target_template {
int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport, int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_ls_req *tls_req); struct nvmefc_tgt_ls_req *tls_req);
int (*fcp_op)(struct nvmet_fc_target_port *tgtport, int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *); struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues; u32 max_hw_queues;
u16 max_sgl_segments; u16 max_sgl_segments;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment