Commit edb98d16 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "A couple of regressions were found in rc1, as well as another set of
  races and bugs:

   - A regression where RDMA_CM_EVENT_REJECTED was lost in some cases

   - Bad error handling in the CM, uverbs, rvt, siw and i40iw

   - Kernel stack memory leak to user space in mlx4

   - Missing data in a uapi query for mlx5

   - Three races found by syzkaller in the ib core code"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/core: Fix race between destroy and release FD object
  IB/rdmavt: Always return ERR_PTR from rvt_create_mmap_info()
  RDMA/core: Fix overwriting of uobj in case of error
  RDMA/core: Prevent mixed use of FDs between shared ufiles
  RDMA/uverbs: Fix a race with disassociate and exit_mmap()
  RDMA/mlx5: Set GRH fields in query QP on RoCE
  RDMA/siw: Fix potential siw_mem refcnt leak in siw_fastreg_mr()
  RDMA/mlx4: Initialize ib_spec on the stack
  RDMA/cm: Fix an error check in cm_alloc_id_priv()
  RDMA/cm: Fix missing RDMA_CM_EVENT_REJECTED event after receiving REJ message
  i40iw: fix null pointer dereference on a null wqe pointer
parents 3f777e19 f0abc761
......@@ -862,7 +862,7 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
&cm.local_id_next, GFP_KERNEL);
if (ret)
if (ret < 0)
goto error;
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
......@@ -1828,11 +1828,9 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
static void cm_format_rej(struct cm_rej_msg *rej_msg,
struct cm_id_private *cm_id_priv,
enum ib_cm_rej_reason reason,
void *ari,
u8 ari_length,
const void *private_data,
u8 private_data_len)
enum ib_cm_rej_reason reason, void *ari,
u8 ari_length, const void *private_data,
u8 private_data_len, enum ib_cm_state state)
{
lockdep_assert_held(&cm_id_priv->lock);
......@@ -1840,7 +1838,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
switch(cm_id_priv->id.state) {
switch (state) {
case IB_CM_REQ_RCVD:
IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
......@@ -1905,8 +1903,9 @@ static void cm_dup_req_handler(struct cm_work *work,
cm_id_priv->private_data_len);
break;
case IB_CM_TIMEWAIT:
cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
IB_CM_TIMEWAIT);
break;
default:
goto unlock;
......@@ -2904,6 +2903,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
u8 ari_length, const void *private_data,
u8 private_data_len)
{
enum ib_cm_state state = cm_id_priv->id.state;
struct ib_mad_send_buf *msg;
int ret;
......@@ -2913,7 +2913,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL;
switch (cm_id_priv->id.state) {
switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD:
......@@ -2925,7 +2925,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
if (ret)
return ret;
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
ari, ari_length, private_data, private_data_len);
ari, ari_length, private_data, private_data_len,
state);
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
......@@ -2934,7 +2935,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
if (ret)
return ret;
cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
ari, ari_length, private_data, private_data_len);
ari, ari_length, private_data, private_data_len,
state);
break;
default:
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
......
......@@ -360,7 +360,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
* uverbs_uobject_fd_release(), and the caller is expected to ensure
* that release is never done while a call to lookup is possible.
*/
if (f->f_op != fd_type->fops) {
if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
fput(f);
return ERR_PTR(-EBADF);
}
......@@ -474,16 +474,15 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
fd_type->flags);
if (IS_ERR(filp)) {
uverbs_uobject_put(uobj);
uobj = ERR_CAST(filp);
goto err_uobj;
goto err_fd;
}
uobj->object = filp;
uobj->id = new_fd;
return uobj;
err_uobj:
uverbs_uobject_put(uobj);
err_fd:
put_unused_fd(new_fd);
return uobj;
......@@ -679,7 +678,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode)
{
assert_uverbs_usecnt(uobj, mode);
uobj->uapi_object->type_class->lookup_put(uobj, mode);
/*
* In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See
......@@ -696,6 +694,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
break;
}
uobj->uapi_object->type_class->lookup_put(uobj, mode);
/* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj);
}
......
......@@ -820,6 +820,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
ret = mmget_not_zero(mm);
if (!ret) {
list_del_init(&priv->list);
if (priv->entry) {
rdma_user_mmap_entry_put(priv->entry);
priv->entry = NULL;
}
mm = NULL;
continue;
}
......
......@@ -1046,7 +1046,7 @@ i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
u64 header;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
if (wqe)
if (!wqe)
return I40IW_ERR_RING_FULL;
set_64bit_val(wqe, 32, feat_mem->pa);
......
......@@ -1499,8 +1499,9 @@ static int __mlx4_ib_create_default_rules(
int i;
for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
union ib_flow_spec ib_spec = {};
int ret;
union ib_flow_spec ib_spec;
switch (pdefault_rules->rules_create_list[i]) {
case 0:
/* no rule */
......
......@@ -5558,7 +5558,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
rdma_ah_set_static_rate(ah_attr,
path->static_rate ? path->static_rate - 5 : 0);
if (path->grh_mlid & (1 << 7)) {
if (path->grh_mlid & (1 << 7) ||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
rdma_ah_set_grh(ah_attr, NULL,
......
......@@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
*/
if (udata && udata->outlen >= sizeof(__u64)) {
cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
if (!cq->ip) {
err = -ENOMEM;
if (IS_ERR(cq->ip)) {
err = PTR_ERR(cq->ip);
goto bail_wc;
}
......
......@@ -154,7 +154,7 @@ int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
* @udata: user data (must be valid!)
* @obj: opaque pointer to a cq, wq etc
*
* Return: rvt_mmap struct on success
* Return: rvt_mmap struct on success, ERR_PTR on failure
*/
struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
struct ib_udata *udata, void *obj)
......@@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
if (!ip)
return ip;
return ERR_PTR(-ENOMEM);
size = PAGE_ALIGN(size);
......
......@@ -1244,8 +1244,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->ip = rvt_create_mmap_info(rdi, s, udata,
qp->r_rq.wq);
if (!qp->ip) {
ret = ERR_PTR(-ENOMEM);
if (IS_ERR(qp->ip)) {
ret = ERR_CAST(qp->ip);
goto bail_qpn;
}
......
......@@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
if (!srq->ip) {
ret = -ENOMEM;
if (IS_ERR(srq->ip)) {
ret = PTR_ERR(srq->ip);
goto bail_wq;
}
......
......@@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
{
struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
struct siw_device *sdev = to_siw_dev(pd->device);
struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
struct siw_mem *mem;
int rv = 0;
siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
if (unlikely(!mem || !base_mr)) {
if (unlikely(!base_mr)) {
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
return -EINVAL;
}
if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) {
pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
rv = -EINVAL;
goto out;
return -EINVAL;
}
mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
if (unlikely(!mem)) {
pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
return -EINVAL;
}
if (unlikely(mem->pd != pd)) {
pr_warn("siw: fastreg: PD mismatch\n");
rv = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment