Commit 14fd1e98 authored by James Smart's avatar James Smart Committed by Jens Axboe

nvme-fc: Add Disconnect Association Rcv support

The nvme-fc host transport did not support the reception of a
FC-NVME LS. Reception is necessary to implement full compliance
with FC-NVME-2.

Populate the LS receive handler, and specifically the handling
of a Disconnect Association LS. The response to the LS, if it
matched a controller, must be sent after the aborts for any
I/O on any connection have been sent.
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ec3b0e3c
......@@ -62,6 +62,17 @@ struct nvmefc_ls_req_op {
bool req_queued;
};
struct nvmefc_ls_rcv_op {
struct nvme_fc_rport *rport;
struct nvmefc_ls_rsp *lsrsp;
union nvmefc_ls_requests *rqstbuf;
union nvmefc_ls_responses *rspbuf;
u16 rqstdatalen;
bool handled;
dma_addr_t rspdma;
struct list_head lsrcv_list; /* rport->ls_rcv_list */
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
enum nvme_fcpop_state {
FCPOP_STATE_UNINIT = 0,
FCPOP_STATE_IDLE = 1,
......@@ -118,6 +129,7 @@ struct nvme_fc_rport {
struct list_head endp_list; /* for lport->endp_list */
struct list_head ctrl_list;
struct list_head ls_req_list;
struct list_head ls_rcv_list;
struct list_head disc_list;
struct device *dev; /* physical device for dma */
struct nvme_fc_lport *lport;
......@@ -125,6 +137,7 @@ struct nvme_fc_rport {
struct kref ref;
atomic_t act_ctrl_cnt;
unsigned long dev_loss_end;
struct work_struct lsrcv_work;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
/* fc_ctrl flags values - specified as bit positions */
......@@ -142,6 +155,7 @@ struct nvme_fc_ctrl {
bool ioq_live;
atomic_t err_work_active;
u64 association_id;
struct nvmefc_ls_rcv_op *rcv_disconn;
struct list_head ctrl_list; /* rport->ctrl_list */
......@@ -219,6 +233,9 @@ static struct device *fc_udev_device;
static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
struct nvme_fc_queue *, unsigned int);
static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
static void
nvme_fc_free_lport(struct kref *ref)
{
......@@ -704,6 +721,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock);
newrec->remoteport.localport = &lport->localport;
INIT_LIST_HEAD(&newrec->ls_rcv_list);
newrec->dev = lport->dev;
newrec->lport = lport;
if (lport->ops->remote_priv_sz)
......@@ -717,6 +735,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
newrec->remoteport.port_num = idx;
__nvme_fc_set_dev_loss_tmo(newrec, pinfo);
INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
spin_lock_irqsave(&nvme_fc_lock, flags);
list_add_tail(&newrec->endp_list, &lport->endp_list);
......@@ -1006,6 +1025,7 @@ fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static void
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
......@@ -1154,6 +1174,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
struct nvmefc_ls_req *lsreq;
struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
struct fcnvme_ls_cr_assoc_acc *assoc_acc;
unsigned long flags;
int ret, fcret = 0;
lsop = kzalloc((sizeof(*lsop) +
......@@ -1243,11 +1264,13 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
"q %d Create Association LS failed: %s\n",
queue->qnum, validation_errors[fcret]);
} else {
spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id =
be64_to_cpu(assoc_acc->associd.association_id);
queue->connection_id =
be64_to_cpu(assoc_acc->connectid.connection_id);
set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
spin_unlock_irqrestore(&ctrl->lock, flags);
}
out_free_buffer:
......@@ -1428,6 +1451,247 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
kfree(lsop);
}
static void
nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
{
struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
struct nvme_fc_rport *rport = lsop->rport;
struct nvme_fc_lport *lport = rport->lport;
unsigned long flags;
spin_lock_irqsave(&rport->lock, flags);
list_del(&lsop->lsrcv_list);
spin_unlock_irqrestore(&rport->lock, flags);
fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
fc_dma_unmap_single(lport->dev, lsop->rspdma,
sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
kfree(lsop);
nvme_fc_rport_put(rport);
}
static void
nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
{
struct nvme_fc_rport *rport = lsop->rport;
struct nvme_fc_lport *lport = rport->lport;
struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
int ret;
fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
lsop->lsrsp);
if (ret) {
dev_warn(lport->dev,
"LLDD rejected LS RSP xmt: LS %d status %d\n",
w0->ls_cmd, ret);
nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
return;
}
}
static struct nvme_fc_ctrl *
nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
struct nvmefc_ls_rcv_op *lsop)
{
struct fcnvme_ls_disconnect_assoc_rqst *rqst =
&lsop->rqstbuf->rq_dis_assoc;
struct nvme_fc_ctrl *ctrl, *ret = NULL;
struct nvmefc_ls_rcv_op *oldls = NULL;
u64 association_id = be64_to_cpu(rqst->associd.association_id);
unsigned long flags;
spin_lock_irqsave(&rport->lock, flags);
list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
if (!nvme_fc_ctrl_get(ctrl))
continue;
spin_lock(&ctrl->lock);
if (association_id == ctrl->association_id) {
oldls = ctrl->rcv_disconn;
ctrl->rcv_disconn = lsop;
ret = ctrl;
}
spin_unlock(&ctrl->lock);
if (ret)
/* leave the ctrl get reference */
break;
nvme_fc_ctrl_put(ctrl);
}
spin_unlock_irqrestore(&rport->lock, flags);
/* transmit a response for anything that was pending */
if (oldls) {
dev_info(rport->lport->dev,
"NVME-FC{%d}: Multiple Disconnect Association "
"LS's received\n", ctrl->cnum);
/* overwrite good response with bogus failure */
oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
sizeof(*oldls->rspbuf),
rqst->w0.ls_cmd,
FCNVME_RJT_RC_UNAB,
FCNVME_RJT_EXP_NONE, 0);
nvme_fc_xmt_ls_rsp(oldls);
}
return ret;
}
/*
* returns true to mean LS handled and ls_rsp can be sent
* returns false to defer ls_rsp xmt (will be done as part of
* association termination)
*/
static bool
nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
{
struct nvme_fc_rport *rport = lsop->rport;
struct fcnvme_ls_disconnect_assoc_rqst *rqst =
&lsop->rqstbuf->rq_dis_assoc;
struct fcnvme_ls_disconnect_assoc_acc *acc =
&lsop->rspbuf->rsp_dis_assoc;
struct nvme_fc_ctrl *ctrl = NULL;
int ret = 0;
memset(acc, 0, sizeof(*acc));
ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
if (!ret) {
/* match an active association */
ctrl = nvme_fc_match_disconn_ls(rport, lsop);
if (!ctrl)
ret = VERR_NO_ASSOC;
}
if (ret) {
dev_info(rport->lport->dev,
"Disconnect LS failed: %s\n",
validation_errors[ret]);
lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
sizeof(*acc), rqst->w0.ls_cmd,
(ret == VERR_NO_ASSOC) ?
FCNVME_RJT_RC_INV_ASSOC :
FCNVME_RJT_RC_LOGIC,
FCNVME_RJT_EXP_NONE, 0);
return true;
}
/* format an ACCept response */
lsop->lsrsp->rsplen = sizeof(*acc);
nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
fcnvme_lsdesc_len(
sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
FCNVME_LS_DISCONNECT_ASSOC);
/*
* the transmit of the response will occur after the exchanges
* for the association have been ABTS'd by
* nvme_fc_delete_association().
*/
/* fail the association */
nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
/* release the reference taken by nvme_fc_match_disconn_ls() */
nvme_fc_ctrl_put(ctrl);
return false;
}
/*
* Actual Processing routine for received FC-NVME LS Requests from the LLD
* returns true if a response should be sent afterward, false if rsp will
* be sent asynchronously.
*/
static bool
nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
{
struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
bool ret = true;
lsop->lsrsp->nvme_fc_private = lsop;
lsop->lsrsp->rspbuf = lsop->rspbuf;
lsop->lsrsp->rspdma = lsop->rspdma;
lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
/* Be preventative. handlers will later set to valid length */
lsop->lsrsp->rsplen = 0;
/*
* handlers:
* parse request input, execute the request, and format the
* LS response
*/
switch (w0->ls_cmd) {
case FCNVME_LS_DISCONNECT_ASSOC:
ret = nvme_fc_ls_disconnect_assoc(lsop);
break;
case FCNVME_LS_DISCONNECT_CONN:
lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
sizeof(*lsop->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
break;
case FCNVME_LS_CREATE_ASSOCIATION:
case FCNVME_LS_CREATE_CONNECTION:
lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
sizeof(*lsop->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
break;
default:
lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
sizeof(*lsop->rspbuf), w0->ls_cmd,
FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
break;
}
return(ret);
}
static void
nvme_fc_handle_ls_rqst_work(struct work_struct *work)
{
struct nvme_fc_rport *rport =
container_of(work, struct nvme_fc_rport, lsrcv_work);
struct fcnvme_ls_rqst_w0 *w0;
struct nvmefc_ls_rcv_op *lsop;
unsigned long flags;
bool sendrsp;
restart:
sendrsp = true;
spin_lock_irqsave(&rport->lock, flags);
list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
if (lsop->handled)
continue;
lsop->handled = true;
if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
spin_unlock_irqrestore(&rport->lock, flags);
sendrsp = nvme_fc_handle_ls_rqst(lsop);
} else {
spin_unlock_irqrestore(&rport->lock, flags);
w0 = &lsop->rqstbuf->w0;
lsop->lsrsp->rsplen = nvme_fc_format_rjt(
lsop->rspbuf,
sizeof(*lsop->rspbuf),
w0->ls_cmd,
FCNVME_RJT_RC_UNAB,
FCNVME_RJT_EXP_NONE, 0);
}
if (sendrsp)
nvme_fc_xmt_ls_rsp(lsop);
goto restart;
}
spin_unlock_irqrestore(&rport->lock, flags);
}
/**
* nvme_fc_rcv_ls_req - transport entry point called by an LLDD
* upon the reception of a NVME LS request.
......@@ -1454,20 +1718,92 @@ nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
{
struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
struct nvme_fc_lport *lport = rport->lport;
struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
struct nvmefc_ls_rcv_op *lsop;
unsigned long flags;
int ret;
nvme_fc_rport_get(rport);
/* validate there's a routine to transmit a response */
if (!lport->ops->xmt_ls_rsp)
return(-EINVAL);
if (!lport->ops->xmt_ls_rsp) {
dev_info(lport->dev,
"RCV %s LS failed: no LLDD xmt_ls_rsp\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
ret = -EINVAL;
goto out_put;
}
if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
dev_info(lport->dev,
"RCV %s LS failed: payload too large\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
ret = -E2BIG;
goto out_put;
}
lsop = kzalloc(sizeof(*lsop) +
sizeof(union nvmefc_ls_requests) +
sizeof(union nvmefc_ls_responses),
GFP_KERNEL);
if (!lsop) {
dev_info(lport->dev,
"RCV %s LS failed: No memory\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
ret = -ENOMEM;
goto out_put;
}
lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
sizeof(*lsop->rspbuf),
DMA_TO_DEVICE);
if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
dev_info(lport->dev,
"RCV %s LS failed: DMA mapping failure\n",
(w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
nvmefc_ls_names[w0->ls_cmd] : "");
ret = -EFAULT;
goto out_free;
}
lsop->rport = rport;
lsop->lsrsp = lsrsp;
memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
lsop->rqstdatalen = lsreqbuf_len;
spin_lock_irqsave(&rport->lock, flags);
if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
spin_unlock_irqrestore(&rport->lock, flags);
ret = -ENOTCONN;
goto out_unmap;
}
list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
spin_unlock_irqrestore(&rport->lock, flags);
schedule_work(&rport->lsrcv_work);
return 0;
out_unmap:
fc_dma_unmap_single(lport->dev, lsop->rspdma,
sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
out_free:
kfree(lsop);
out_put:
nvme_fc_rport_put(rport);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
/* *********************** NVME Ctrl Routines **************************** */
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static void
__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_fcp_op *op)
......@@ -2612,6 +2948,8 @@ static int
nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
int ret;
bool changed;
......@@ -2729,7 +3067,13 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
out_disconnect_admin_queue:
/* send a Disconnect(association) LS to fc-nvme target */
nvme_fc_xmt_disconnect_assoc(ctrl);
spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id = 0;
disls = ctrl->rcv_disconn;
ctrl->rcv_disconn = NULL;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (disls)
nvme_fc_xmt_ls_rsp(disls);
out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
......@@ -2749,6 +3093,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
static void
nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
{
struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
......@@ -2820,7 +3165,17 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
if (ctrl->association_id)
nvme_fc_xmt_disconnect_assoc(ctrl);
spin_lock_irqsave(&ctrl->lock, flags);
ctrl->association_id = 0;
disls = ctrl->rcv_disconn;
ctrl->rcv_disconn = NULL;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (disls)
/*
* if a Disconnect Request was waiting for a response, send
* now that all ABTS's have been issued (and are complete).
*/
nvme_fc_xmt_ls_rsp(disls);
if (ctrl->ctrl.tagset) {
nvme_fc_delete_hw_io_queues(ctrl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment