Commit 38803fcf authored by James Smart's avatar James Smart Committed by Christoph Hellwig

nvme-fcloop: fix deallocation of working context

There's been a longstanding bug of LS completions which freed ls ops,
particularly the disconnect LS, while executing on a work context that
is in the memory being free. Not a good thing to do.

Rework LS handling to make callbacks in the rport context rather than
the ls_request context.
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarHimanshu Madhani <himanshu.madhani@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent c95b708d
...@@ -198,10 +198,13 @@ struct fcloop_lport_priv { ...@@ -198,10 +198,13 @@ struct fcloop_lport_priv {
}; };
struct fcloop_rport { struct fcloop_rport {
struct nvme_fc_remote_port *remoteport; struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport; struct nvmet_fc_target_port *targetport;
struct fcloop_nport *nport; struct fcloop_nport *nport;
struct fcloop_lport *lport; struct fcloop_lport *lport;
spinlock_t lock;
struct list_head ls_list;
struct work_struct ls_work;
}; };
struct fcloop_tport { struct fcloop_tport {
...@@ -224,11 +227,10 @@ struct fcloop_nport { ...@@ -224,11 +227,10 @@ struct fcloop_nport {
}; };
struct fcloop_lsreq { struct fcloop_lsreq {
struct fcloop_tport *tport;
struct nvmefc_ls_req *lsreq; struct nvmefc_ls_req *lsreq;
struct work_struct work;
struct nvmefc_tgt_ls_req tgt_ls_req; struct nvmefc_tgt_ls_req tgt_ls_req;
int status; int status;
struct list_head ls_list; /* fcloop_rport->ls_list */
}; };
struct fcloop_rscn { struct fcloop_rscn {
...@@ -292,21 +294,32 @@ fcloop_delete_queue(struct nvme_fc_local_port *localport, ...@@ -292,21 +294,32 @@ fcloop_delete_queue(struct nvme_fc_local_port *localport,
{ {
} }
/*
* Transmit of LS RSP done (e.g. buffers all set). call back up
* initiator "done" flows.
*/
static void static void
fcloop_tgt_lsrqst_done_work(struct work_struct *work) fcloop_rport_lsrqst_work(struct work_struct *work)
{ {
struct fcloop_lsreq *tls_req = struct fcloop_rport *rport =
container_of(work, struct fcloop_lsreq, work); container_of(work, struct fcloop_rport, ls_work);
struct fcloop_tport *tport = tls_req->tport; struct fcloop_lsreq *tls_req;
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
if (!tport || tport->remoteport) spin_lock(&rport->lock);
lsreq->done(lsreq, tls_req->status); for (;;) {
tls_req = list_first_entry_or_null(&rport->ls_list,
struct fcloop_lsreq, ls_list);
if (!tls_req)
break;
list_del(&tls_req->ls_list);
spin_unlock(&rport->lock);
tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
/*
* callee may free memory containing tls_req.
* do not reference lsreq after this.
*/
spin_lock(&rport->lock);
}
spin_unlock(&rport->lock);
} }
static int static int
...@@ -319,17 +332,18 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, ...@@ -319,17 +332,18 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
int ret = 0; int ret = 0;
tls_req->lsreq = lsreq; tls_req->lsreq = lsreq;
INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work); INIT_LIST_HEAD(&tls_req->ls_list);
if (!rport->targetport) { if (!rport->targetport) {
tls_req->status = -ECONNREFUSED; tls_req->status = -ECONNREFUSED;
tls_req->tport = NULL; spin_lock(&rport->lock);
schedule_work(&tls_req->work); list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
schedule_work(&rport->ls_work);
return ret; return ret;
} }
tls_req->status = 0; tls_req->status = 0;
tls_req->tport = rport->targetport->private;
ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req, ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
lsreq->rqstaddr, lsreq->rqstlen); lsreq->rqstaddr, lsreq->rqstlen);
...@@ -337,18 +351,28 @@ fcloop_ls_req(struct nvme_fc_local_port *localport, ...@@ -337,18 +351,28 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
} }
static int static int
fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport, fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
struct nvmefc_tgt_ls_req *tgt_lsreq) struct nvmefc_tgt_ls_req *tgt_lsreq)
{ {
struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq); struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
struct nvmefc_ls_req *lsreq = tls_req->lsreq; struct nvmefc_ls_req *lsreq = tls_req->lsreq;
struct fcloop_tport *tport = targetport->private;
struct nvme_fc_remote_port *remoteport = tport->remoteport;
struct fcloop_rport *rport;
memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf, memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
((lsreq->rsplen < tgt_lsreq->rsplen) ? ((lsreq->rsplen < tgt_lsreq->rsplen) ?
lsreq->rsplen : tgt_lsreq->rsplen)); lsreq->rsplen : tgt_lsreq->rsplen));
tgt_lsreq->done(tgt_lsreq); tgt_lsreq->done(tgt_lsreq);
schedule_work(&tls_req->work); if (remoteport) {
rport = remoteport->private;
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
schedule_work(&rport->ls_work);
}
return 0; return 0;
} }
...@@ -834,6 +858,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) ...@@ -834,6 +858,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{ {
struct fcloop_rport *rport = remoteport->private; struct fcloop_rport *rport = remoteport->private;
flush_work(&rport->ls_work);
fcloop_nport_put(rport->nport); fcloop_nport_put(rport->nport);
} }
...@@ -1136,6 +1161,9 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, ...@@ -1136,6 +1161,9 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->nport = nport; rport->nport = nport;
rport->lport = nport->lport; rport->lport = nport->lport;
nport->rport = rport; nport->rport = rport;
spin_lock_init(&rport->lock);
INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
INIT_LIST_HEAD(&rport->ls_list);
return count; return count;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment