Commit d9a9755a authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.7' of git://git.infradead.org/nvme into block-5.7

Pull NVMe fixes from Christoph.

* 'nvme-5.7' of git://git.infradead.org/nvme:
  nvmet-rdma: fix double free of rdma queue
  nvme-fc: Revert "add module to ops template to allow module references"
  nvme: fix deadlock caused by ANA update wrong locking
  nvmet-rdma: fix bonding failover possible NULL deref
  nvmet: fix NULL dereference when removing a referral
  nvme: inherit stable pages constraint in the mpath stack device
  nvme-tcp: fix possible crash in recv error flow
  nvme-tcp: don't poll a non-live queue
  nvme-tcp: fix possible crash in write_zeroes processing
  nvmet-fc: fix typo in comment
  nvme-rdma: Replace comma with a semicolon
  nvme-fcloop: fix deallocation of working context
  nvme: fix compat address handling in several ioctls
parents d3ef5536 21f90243
......@@ -6,6 +6,7 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
......@@ -1252,6 +1253,18 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work);
}
/*
* Convert integer values from ioctl structures to user pointers, silently
* ignoring the upper bits in the compat case to match behaviour of 32-bit
* kernels.
*/
static void __user *nvme_to_user_ptr(uintptr_t ptrval)
{
if (in_compat_syscall())
ptrval = (compat_uptr_t)ptrval;
return (void __user *)ptrval;
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_user_io io;
......@@ -1275,7 +1288,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
length = (io.nblocks + 1) << ns->lba_shift;
meta_len = (io.nblocks + 1) * ns->ms;
metadata = (void __user *)(uintptr_t)io.metadata;
metadata = nvme_to_user_ptr(io.metadata);
if (ns->ext) {
length += meta_len;
......@@ -1298,7 +1311,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
(void __user *)(uintptr_t)io.addr, length,
nvme_to_user_ptr(io.addr), length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
......@@ -1418,9 +1431,9 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
(void __user *)(uintptr_t)cmd.metadata,
cmd.metadata_len, 0, &result, timeout);
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout);
nvme_passthru_end(ctrl, effects);
if (status >= 0) {
......@@ -1465,8 +1478,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
(void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
nvme_to_user_ptr(cmd.addr), cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout);
nvme_passthru_end(ctrl, effects);
......@@ -1884,6 +1897,13 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
struct backing_dev_info *info =
ns->head->disk->queue->backing_dev_info;
info->capabilities |= BDI_CAP_STABLE_WRITES;
}
revalidate_disk(ns->head->disk);
}
#endif
......
......@@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments ||
!template->max_dif_sgl_segments || !template->dma_boundary ||
!template->module) {
!template->max_dif_sgl_segments || !template->dma_boundary) {
ret = -EINVAL;
goto out_reghost_failed;
}
......@@ -2016,7 +2015,6 @@ nvme_fc_ctrl_free(struct kref *ref)
{
struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref);
struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags;
if (ctrl->ctrl.tagset) {
......@@ -2043,7 +2041,6 @@ nvme_fc_ctrl_free(struct kref *ref)
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
module_put(lport->ops->module);
}
static void
......@@ -3074,15 +3071,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
if (!try_module_get(lport->ops->module)) {
ret = -EUNATCH;
goto out_free_ctrl;
}
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
goto out_mod_put;
goto out_free_ctrl;
}
ctrl->ctrl.opts = opts;
......@@ -3232,8 +3224,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
out_free_ida:
put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
out_mod_put:
module_put(lport->ops->module);
out_free_ctrl:
kfree(ctrl);
out_fail:
......
......@@ -510,7 +510,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
down_write(&ctrl->namespaces_rwsem);
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
unsigned nsid = le32_to_cpu(desc->nsids[n]);
......@@ -521,7 +521,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (++n == nr_nsids)
break;
}
up_write(&ctrl->namespaces_rwsem);
up_read(&ctrl->namespaces_rwsem);
return 0;
}
......
......@@ -1350,7 +1350,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
int ret;
sge->addr = qe->dma;
sge->length = sizeof(struct nvme_command),
sge->length = sizeof(struct nvme_command);
sge->lkey = queue->device->pd->local_dma_lkey;
wr.next = NULL;
......
......@@ -174,16 +174,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
{
struct request *rq;
unsigned int bytes;
if (unlikely(nvme_tcp_async_req(req)))
return false; /* async events don't have a request */
rq = blk_mq_rq_from_pdu(req);
bytes = blk_rq_payload_bytes(rq);
return rq_data_dir(rq) == WRITE && bytes &&
bytes <= nvme_tcp_inline_data_size(req->queue);
return rq_data_dir(rq) == WRITE && req->data_len &&
req->data_len <= nvme_tcp_inline_data_size(req->queue);
}
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
......@@ -1075,7 +1073,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
if (result > 0)
pending = true;
else if (unlikely(result < 0))
break;
return;
if (!pending)
return;
......@@ -2164,7 +2162,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
c->common.flags |= NVME_CMD_SGL_METABUF;
if (rq_data_dir(rq) == WRITE && req->data_len &&
if (!blk_rq_nr_phys_segments(rq))
nvme_tcp_set_sg_null(c);
else if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue))
nvme_tcp_set_sg_inline(queue, c, req->data_len);
else
......@@ -2191,7 +2191,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_sent = 0;
req->pdu_len = 0;
req->pdu_sent = 0;
req->data_len = blk_rq_payload_bytes(rq);
req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
if (rq_data_dir(rq) == WRITE &&
......@@ -2298,6 +2299,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return 0;
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
......
......@@ -1098,12 +1098,19 @@ static struct configfs_attribute *nvmet_referral_attrs[] = {
NULL,
};
static void nvmet_referral_release(struct config_item *item)
static void nvmet_referral_notify(struct config_group *group,
struct config_item *item)
{
struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
struct nvmet_port *port = to_nvmet_port(item);
nvmet_referral_disable(parent, port);
}
static void nvmet_referral_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
kfree(port);
}
......@@ -1134,6 +1141,7 @@ static struct config_group *nvmet_referral_make(
static struct configfs_group_operations nvmet_referral_group_ops = {
.make_group = nvmet_referral_make,
.disconnect_notify = nvmet_referral_notify,
};
static const struct config_item_type nvmet_referrals_type = {
......
......@@ -684,7 +684,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
disconnect = atomic_xchg(&queue->connected, 0);
spin_lock_irqsave(&queue->qlock, flags);
/* about outstanding io's */
/* abort outstanding io's */
for (i = 0; i < queue->sqsize; fod++, i++) {
if (fod->active) {
spin_lock(&fod->flock);
......
......@@ -198,10 +198,13 @@ struct fcloop_lport_priv {
};
struct fcloop_rport {
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
struct fcloop_nport *nport;
struct fcloop_lport *lport;
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
struct fcloop_nport *nport;
struct fcloop_lport *lport;
spinlock_t lock;
struct list_head ls_list;
struct work_struct ls_work;
};
struct fcloop_tport {
......@@ -224,11 +227,10 @@ struct fcloop_nport {
};
struct fcloop_lsreq {
struct fcloop_tport *tport;
struct nvmefc_ls_req *lsreq;
struct work_struct work;
struct nvmefc_tgt_ls_req tgt_ls_req;
int status;
struct list_head ls_list; /* fcloop_rport->ls_list */
};
struct fcloop_rscn {
......@@ -292,21 +294,32 @@ fcloop_delete_queue(struct nvme_fc_local_port *localport,
{
}
/*
* Transmit of LS RSP done (e.g. buffers all set). call back up
* initiator "done" flows.
*/
static void
fcloop_tgt_lsrqst_done_work(struct work_struct *work)
fcloop_rport_lsrqst_work(struct work_struct *work)
{
struct fcloop_lsreq *tls_req =
container_of(work, struct fcloop_lsreq, work);
struct fcloop_tport *tport = tls_req->tport;
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
struct fcloop_rport *rport =
container_of(work, struct fcloop_rport, ls_work);
struct fcloop_lsreq *tls_req;
if (!tport || tport->remoteport)
lsreq->done(lsreq, tls_req->status);
spin_lock(&rport->lock);
for (;;) {
tls_req = list_first_entry_or_null(&rport->ls_list,
struct fcloop_lsreq, ls_list);
if (!tls_req)
break;
list_del(&tls_req->ls_list);
spin_unlock(&rport->lock);
tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
/*
* callee may free memory containing tls_req.
* do not reference lsreq after this.
*/
spin_lock(&rport->lock);
}
spin_unlock(&rport->lock);
}
static int
......@@ -319,17 +332,18 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
int ret = 0;
tls_req->lsreq = lsreq;
INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
INIT_LIST_HEAD(&tls_req->ls_list);
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
tls_req->tport = NULL;
schedule_work(&tls_req->work);
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
schedule_work(&rport->ls_work);
return ret;
}
tls_req->status = 0;
tls_req->tport = rport->targetport->private;
ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
lsreq->rqstaddr, lsreq->rqstlen);
......@@ -337,18 +351,28 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
}
static int
fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
struct nvmefc_tgt_ls_req *tgt_lsreq)
{
struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
struct fcloop_tport *tport = targetport->private;
struct nvme_fc_remote_port *remoteport = tport->remoteport;
struct fcloop_rport *rport;
memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
((lsreq->rsplen < tgt_lsreq->rsplen) ?
lsreq->rsplen : tgt_lsreq->rsplen));
tgt_lsreq->done(tgt_lsreq);
schedule_work(&tls_req->work);
if (remoteport) {
rport = remoteport->private;
spin_lock(&rport->lock);
list_add_tail(&rport->ls_list, &tls_req->ls_list);
spin_unlock(&rport->lock);
schedule_work(&rport->ls_work);
}
return 0;
}
......@@ -834,6 +858,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
flush_work(&rport->ls_work);
fcloop_nport_put(rport->nport);
}
......@@ -850,7 +875,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = {
.module = THIS_MODULE,
.localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
......@@ -1136,6 +1160,9 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->nport = nport;
rport->lport = nport->lport;
nport->rport = rport;
spin_lock_init(&rport->lock);
INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
INIT_LIST_HEAD(&rport->ls_list);
return count;
}
......
......@@ -78,6 +78,7 @@ enum nvmet_rdma_queue_state {
struct nvmet_rdma_queue {
struct rdma_cm_id *cm_id;
struct ib_qp *qp;
struct nvmet_port *port;
struct ib_cq *cq;
atomic_t sq_wr_avail;
......@@ -105,6 +106,13 @@ struct nvmet_rdma_queue {
struct list_head queue_list;
};
struct nvmet_rdma_port {
struct nvmet_port *nport;
struct sockaddr_storage addr;
struct rdma_cm_id *cm_id;
struct delayed_work repair_work;
};
struct nvmet_rdma_device {
struct ib_device *device;
struct ib_pd *pd;
......@@ -467,7 +475,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
if (ndev->srq)
ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
else
ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
if (unlikely(ret))
pr_err("post_recv cmd failed\n");
......@@ -506,7 +514,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
if (rsp->n_rdma) {
rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
queue->cm_id->port_num, rsp->req.sg,
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
}
......@@ -590,7 +598,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(rsp->n_rdma <= 0);
atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
queue->cm_id->port_num, rsp->req.sg,
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
rsp->n_rdma = 0;
......@@ -745,7 +753,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
}
if (nvmet_rdma_need_data_in(rsp)) {
if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
......@@ -917,7 +925,8 @@ static void nvmet_rdma_free_dev(struct kref *ref)
static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
struct nvmet_port *port = cm_id->context;
struct nvmet_rdma_port *port = cm_id->context;
struct nvmet_port *nport = port->nport;
struct nvmet_rdma_device *ndev;
int inline_page_count;
int inline_sge_count;
......@@ -934,17 +943,17 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
if (!ndev)
goto out_err;
inline_page_count = num_pages(port->inline_data_size);
inline_page_count = num_pages(nport->inline_data_size);
inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
cm_id->device->attrs.max_recv_sge) - 1;
if (inline_page_count > inline_sge_count) {
pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
port->inline_data_size, cm_id->device->name,
nport->inline_data_size, cm_id->device->name,
inline_sge_count * PAGE_SIZE);
port->inline_data_size = inline_sge_count * PAGE_SIZE;
nport->inline_data_size = inline_sge_count * PAGE_SIZE;
inline_page_count = inline_sge_count;
}
ndev->inline_data_size = port->inline_data_size;
ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
ndev->device = cm_id->device;
kref_init(&ndev->ref);
......@@ -1030,6 +1039,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
pr_err("failed to create_qp ret= %d\n", ret);
goto err_destroy_cq;
}
queue->qp = queue->cm_id->qp;
atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
......@@ -1058,11 +1068,10 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{
struct ib_qp *qp = queue->cm_id->qp;
ib_drain_qp(qp);
rdma_destroy_id(queue->cm_id);
ib_destroy_qp(qp);
ib_drain_qp(queue->qp);
if (queue->cm_id)
rdma_destroy_id(queue->cm_id);
ib_destroy_qp(queue->qp);
ib_free_cq(queue->cq);
}
......@@ -1272,6 +1281,7 @@ static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct nvmet_rdma_port *port = cm_id->context;
struct nvmet_rdma_device *ndev;
struct nvmet_rdma_queue *queue;
int ret = -EINVAL;
......@@ -1287,7 +1297,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = -ENOMEM;
goto put_device;
}
queue->port = cm_id->context;
queue->port = port->nport;
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
......@@ -1296,9 +1306,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
/*
* Don't destroy the cm_id in free path, as we implicitly
* destroy the cm_id here with non-zero ret code.
*/
queue->cm_id = NULL;
goto free_queue;
}
mutex_lock(&nvmet_rdma_queue_mutex);
......@@ -1307,6 +1320,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
return 0;
free_queue:
nvmet_rdma_free_queue(queue);
put_device:
kref_put(&ndev->ref, nvmet_rdma_free_dev);
......@@ -1412,7 +1427,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
{
struct nvmet_port *port;
struct nvmet_rdma_port *port;
if (queue) {
/*
......@@ -1431,7 +1446,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
* cm_id destroy. use atomic xchg to make sure
* we don't compete with remove_port.
*/
if (xchg(&port->priv, NULL) != cm_id)
if (xchg(&port->cm_id, NULL) != cm_id)
return 0;
/*
......@@ -1462,6 +1477,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
nvmet_rdma_queue_established(queue);
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
if (!queue) {
struct nvmet_rdma_port *port = cm_id->context;
schedule_delayed_work(&port->repair_work, 0);
break;
}
/* FALLTHROUGH */
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
nvmet_rdma_queue_disconnect(queue);
......@@ -1504,42 +1526,19 @@ static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
mutex_unlock(&nvmet_rdma_queue_mutex);
}
static int nvmet_rdma_add_port(struct nvmet_port *port)
static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
struct rdma_cm_id *cm_id;
struct sockaddr_storage addr = { };
__kernel_sa_family_t af;
int ret;
struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
switch (port->disc_addr.adrfam) {
case NVMF_ADDR_FAMILY_IP4:
af = AF_INET;
break;
case NVMF_ADDR_FAMILY_IP6:
af = AF_INET6;
break;
default:
pr_err("address family %d not supported\n",
port->disc_addr.adrfam);
return -EINVAL;
}
if (port->inline_data_size < 0) {
port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
} else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
pr_warn("inline_data_size %u is too large, reducing to %u\n",
port->inline_data_size,
NVMET_RDMA_MAX_INLINE_DATA_SIZE);
port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
}
if (cm_id)
rdma_destroy_id(cm_id);
}
ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
port->disc_addr.trsvcid, &addr);
if (ret) {
pr_err("malformed ip/port passed: %s:%s\n",
port->disc_addr.traddr, port->disc_addr.trsvcid);
return ret;
}
static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
{
struct sockaddr *addr = (struct sockaddr *)&port->addr;
struct rdma_cm_id *cm_id;
int ret;
cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
RDMA_PS_TCP, IB_QPT_RC);
......@@ -1558,23 +1557,19 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
goto out_destroy_id;
}
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
ret = rdma_bind_addr(cm_id, addr);
if (ret) {
pr_err("binding CM ID to %pISpcs failed (%d)\n",
(struct sockaddr *)&addr, ret);
pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
ret = rdma_listen(cm_id, 128);
if (ret) {
pr_err("listening to %pISpcs failed (%d)\n",
(struct sockaddr *)&addr, ret);
pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
pr_info("enabling port %d (%pISpcs)\n",
le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
port->priv = cm_id;
port->cm_id = cm_id;
return 0;
out_destroy_id:
......@@ -1582,18 +1577,92 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
return ret;
}
static void nvmet_rdma_remove_port(struct nvmet_port *port)
static void nvmet_rdma_repair_port_work(struct work_struct *w)
{
struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
struct nvmet_rdma_port, repair_work);
int ret;
if (cm_id)
rdma_destroy_id(cm_id);
nvmet_rdma_disable_port(port);
ret = nvmet_rdma_enable_port(port);
if (ret)
schedule_delayed_work(&port->repair_work, 5 * HZ);
}
static int nvmet_rdma_add_port(struct nvmet_port *nport)
{
struct nvmet_rdma_port *port;
__kernel_sa_family_t af;
int ret;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
nport->priv = port;
port->nport = nport;
INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
switch (nport->disc_addr.adrfam) {
case NVMF_ADDR_FAMILY_IP4:
af = AF_INET;
break;
case NVMF_ADDR_FAMILY_IP6:
af = AF_INET6;
break;
default:
pr_err("address family %d not supported\n",
nport->disc_addr.adrfam);
ret = -EINVAL;
goto out_free_port;
}
if (nport->inline_data_size < 0) {
nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
} else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
pr_warn("inline_data_size %u is too large, reducing to %u\n",
nport->inline_data_size,
NVMET_RDMA_MAX_INLINE_DATA_SIZE);
nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
}
ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
nport->disc_addr.trsvcid, &port->addr);
if (ret) {
pr_err("malformed ip/port passed: %s:%s\n",
nport->disc_addr.traddr, nport->disc_addr.trsvcid);
goto out_free_port;
}
ret = nvmet_rdma_enable_port(port);
if (ret)
goto out_free_port;
pr_info("enabling port %d (%pISpcs)\n",
le16_to_cpu(nport->disc_addr.portid),
(struct sockaddr *)&port->addr);
return 0;
out_free_port:
kfree(port);
return ret;
}
static void nvmet_rdma_remove_port(struct nvmet_port *nport)
{
struct nvmet_rdma_port *port = nport->priv;
cancel_delayed_work_sync(&port->repair_work);
nvmet_rdma_disable_port(port);
kfree(port);
}
static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
struct nvmet_port *port, char *traddr)
struct nvmet_port *nport, char *traddr)
{
struct rdma_cm_id *cm_id = port->priv;
struct nvmet_rdma_port *port = nport->priv;
struct rdma_cm_id *cm_id = port->cm_id;
if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
struct nvmet_rdma_rsp *rsp =
......@@ -1603,7 +1672,7 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
sprintf(traddr, "%pISc", addr);
} else {
memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
}
}
......
......@@ -1985,8 +1985,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
.module = THIS_MODULE,
/* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete,
......
......@@ -610,7 +610,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
.module = THIS_MODULE,
.localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue,
......
......@@ -270,8 +270,6 @@ struct nvme_fc_remote_port {
*
* Host/Initiator Transport Entrypoints/Parameters:
*
* @module: The LLDD module using the interface
*
* @localport_delete: The LLDD initiates deletion of a localport via
* nvme_fc_deregister_localport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
......@@ -385,8 +383,6 @@ struct nvme_fc_remote_port {
* Value is Mandatory. Allowed to be zero.
*/
struct nvme_fc_port_template {
struct module *module;
/* initiator-based functions */
void (*localport_delete)(struct nvme_fc_local_port *);
void (*remoteport_delete)(struct nvme_fc_remote_port *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment