Commit 28a0ea77 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "This fixes one major regression with NFS and mlx4 due to the max_sg
  rework in this merge window, tidies a few minor error_path
  regressions, and various small fixes.

  The HFI1 driver is broken this cycle due to a regression caused by a
  PCI change, it is looking like Bjorn will merge a fix for this. Also,
  the lingering ipoib issue I mentioned earlier still remains unfixed.

  Summary:

   - Fix possible FD type confusion crash

   - Fix a user trigger-able crash in cxgb4

   - Fix bad handling of IOMMU resources causing user controlled leaking
     in bnxt

   - Add missing locking in ipoib to fix a rare 'stuck tx' situation

   - Add missing locking in cma

   - Add two missing missing uverbs cleanups on failure paths,
     regressions from this merge window

   - Fix a regression from this merge window that caused RDMA NFS to not
     work with the mlx4 driver due to the max_sg changes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/mlx4: Ensure that maximal send/receive SGE less than supported by HW
  RDMA/cma: Protect cma dev list with lock
  RDMA/uverbs: Fix error cleanup path of ib_uverbs_add_one()
  bnxt_re: Fix couple of memory leaks that could lead to IOMMU call traces
  IB/ipoib: Avoid a race condition between start_xmit and cm_rep_handler
  iw_cxgb4: only allow 1 flush on user qps
  IB/core: Release object lock if destroy failed
  RDMA/ucma: check fd type in ucma_migrate_id()
parents 11da3a7f 8f28b178
...@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) ...@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
dgid = (union ib_gid *) &addr->sib_addr; dgid = (union ib_gid *) &addr->sib_addr;
pkey = ntohs(addr->sib_pkey); pkey = ntohs(addr->sib_pkey);
mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) { list_for_each_entry(cur_dev, &dev_list, list) {
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
if (!rdma_cap_af_ib(cur_dev->device, p)) if (!rdma_cap_af_ib(cur_dev->device, p))
...@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) ...@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
cma_dev = cur_dev; cma_dev = cur_dev;
sgid = gid; sgid = gid;
id_priv->id.port_num = p; id_priv->id.port_num = p;
goto found;
} }
} }
} }
} }
mutex_unlock(&lock);
if (!cma_dev)
return -ENODEV; return -ENODEV;
found: found:
cma_attach_to_dev(id_priv, cma_dev); cma_attach_to_dev(id_priv, cma_dev);
addr = (struct sockaddr_ib *) cma_src_addr(id_priv); mutex_unlock(&lock);
memcpy(&addr->sib_addr, &sgid, sizeof sgid); addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
return 0; return 0;
} }
......
...@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile, ...@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
if (!uverbs_destroy_uobject(obj, reason)) if (!uverbs_destroy_uobject(obj, reason))
ret = 0; ret = 0;
else
atomic_set(&obj->usecnt, 0);
} }
return ret; return ret;
} }
......
...@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut); ...@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
static DEFINE_IDR(ctx_idr); static DEFINE_IDR(ctx_idr);
static DEFINE_IDR(multicast_idr); static DEFINE_IDR(multicast_idr);
static const struct file_operations ucma_fops;
static inline struct ucma_context *_ucma_find_context(int id, static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file) struct ucma_file *file)
{ {
...@@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, ...@@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
f = fdget(cmd.fd); f = fdget(cmd.fd);
if (!f.file) if (!f.file)
return -ENOENT; return -ENOENT;
if (f.file->f_op != &ucma_fops) {
ret = -EINVAL;
goto file_put;
}
/* Validate current fd and prevent destruction of id. */ /* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(f.file->private_data, cmd.id); ctx = ucma_get_ctx(f.file->private_data, cmd.id);
......
...@@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device) ...@@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
uverbs_dev->num_comp_vectors = device->num_comp_vectors; uverbs_dev->num_comp_vectors = device->num_comp_vectors;
if (ib_uverbs_create_uapi(device, uverbs_dev)) if (ib_uverbs_create_uapi(device, uverbs_dev))
goto err; goto err_uapi;
cdev_init(&uverbs_dev->cdev, NULL); cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE; uverbs_dev->cdev.owner = THIS_MODULE;
...@@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device) ...@@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
err_class: err_class:
device_destroy(uverbs_class, uverbs_dev->cdev.dev); device_destroy(uverbs_class, uverbs_dev->cdev.dev);
err_cdev: err_cdev:
cdev_del(&uverbs_dev->cdev); cdev_del(&uverbs_dev->cdev);
err_uapi:
clear_bit(devnum, dev_map); clear_bit(devnum, dev_map);
err: err:
if (atomic_dec_and_test(&uverbs_dev->refcount)) if (atomic_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev); ib_uverbs_comp_dev(uverbs_dev);
......
...@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) ...@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
"Failed to destroy Shadow QP"); "Failed to destroy Shadow QP");
return rc; return rc;
} }
bnxt_qplib_free_qp_res(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp);
mutex_lock(&rdev->qp_lock); mutex_lock(&rdev->qp_lock);
list_del(&rdev->qp1_sqp->list); list_del(&rdev->qp1_sqp->list);
atomic_dec(&rdev->qp_count); atomic_dec(&rdev->qp_count);
......
...@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res, ...@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp) struct bnxt_qplib_qp *qp)
{ {
struct bnxt_qplib_q *rq = &qp->rq; struct bnxt_qplib_q *rq = &qp->rq;
struct bnxt_qplib_q *sq = &qp->rq; struct bnxt_qplib_q *sq = &qp->sq;
int rc = 0; int rc = 0;
if (qp->sq_hdr_buf_size && sq->hwq.max_elements) { if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
......
...@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp) ...@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
schp = to_c4iw_cq(qhp->ibqp.send_cq); schp = to_c4iw_cq(qhp->ibqp.send_cq);
if (qhp->ibqp.uobject) { if (qhp->ibqp.uobject) {
/* for user qps, qhp->wq.flushed is protected by qhp->mutex */
if (qhp->wq.flushed)
return;
qhp->wq.flushed = 1;
t4_set_wq_in_error(&qhp->wq, 0); t4_set_wq_in_error(&qhp->wq, 0);
t4_set_cq_in_error(&rchp->cq); t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag); spin_lock_irqsave(&rchp->comp_handler_lock, flag);
......
...@@ -517,8 +517,10 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, ...@@ -517,8 +517,10 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->page_size_cap = dev->dev->caps.page_size_cap; props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->quotas.qp; props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_send_sge = dev->dev->caps.max_sq_sg; props->max_send_sge =
props->max_recv_sge = dev->dev->caps.max_rq_sg; min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
props->max_recv_sge =
min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
props->max_sge_rd = MLX4_MAX_SGE_RD; props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_cq = dev->dev->quotas.cq; props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes; props->max_cqe = dev->dev->caps.max_cqes;
......
...@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, ...@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
skb_queue_head_init(&skqueue); skb_queue_head_init(&skqueue);
netif_tx_lock_bh(p->dev);
spin_lock_irq(&priv->lock); spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags); set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh) if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue))) while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb); __skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
netif_tx_unlock_bh(p->dev);
while ((skb = __skb_dequeue(&skqueue))) { while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev; skb->dev = p->dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment