Commit 1abd8a8f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Here are eight fairly small fixes collected over the last two weeks.

  Regression and crashing bug fixes:

   - mlx4/5: Fixes for issues found from various checkers

   - A resource tracking and uverbs regression in the core code

   - qedr: NULL pointer regression found during testing

   - rxe: Various small bugs"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/rxe: Fix missing completion for mem_reg work requests
  RDMA/core: Save kernel caller name when creating CQ using ib_create_cq()
  IB/uverbs: Fix ordering of ucontext check in ib_uverbs_write
  IB/mlx4: Fix an error handling path in 'mlx4_ib_rereg_user_mr()'
  RDMA/qedr: Fix NULL pointer dereference when running over iWARP without RDMA-CM
  IB/mlx5: Fix return value check in flow_counters_set_data()
  IB/mlx5: Fix memory leak in mlx5_ib_create_flow
  IB/rxe: avoid double kfree skb
parents d8894a08 375dc53d
...@@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (ret) if (ret)
return ret; return ret;
if (!file->ucontext &&
(command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
return -EINVAL;
if (extended) { if (extended) {
if (count < (sizeof(hdr) + sizeof(ex_hdr))) if (count < (sizeof(hdr) + sizeof(ex_hdr)))
return -EINVAL; return -EINVAL;
...@@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out; goto out;
} }
/*
* Must be after the ib_dev check, as once the RCU clears ib_dev ==
* NULL means ucontext == NULL
*/
if (!file->ucontext &&
(command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
ret = -EINVAL;
goto out;
}
if (!verify_command_mask(ib_dev, command, extended)) { if (!verify_command_mask(ib_dev, command, extended)) {
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out; goto out;
......
...@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp); ...@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
/* Completion queues */ /* Completion queues */
struct ib_cq *ib_create_cq(struct ib_device *device, struct ib_cq *__ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler, ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *), void (*event_handler)(struct ib_event *, void *),
void *cq_context, void *cq_context,
const struct ib_cq_init_attr *cq_attr) const struct ib_cq_init_attr *cq_attr,
const char *caller)
{ {
struct ib_cq *cq; struct ib_cq *cq;
...@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device, ...@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
cq->cq_context = cq_context; cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0); atomic_set(&cq->usecnt, 0);
cq->res.type = RDMA_RESTRACK_CQ; cq->res.type = RDMA_RESTRACK_CQ;
cq->res.kern_name = caller;
rdma_restrack_add(&cq->res); rdma_restrack_add(&cq->res);
} }
return cq; return cq;
} }
EXPORT_SYMBOL(ib_create_cq); EXPORT_SYMBOL(__ib_create_cq);
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{ {
......
...@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, ...@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
} }
if (flags & IB_MR_REREG_ACCESS) { if (flags & IB_MR_REREG_ACCESS) {
if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) if (ib_access_writable(mr_access_flags) &&
return -EPERM; !mmr->umem->writable) {
err = -EPERM;
goto release_mpt_entry;
}
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
convert_access(mr_access_flags)); convert_access(mr_access_flags));
......
...@@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters, ...@@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
if (!mcounters->hw_cntrs_hndl) { if (!mcounters->hw_cntrs_hndl) {
mcounters->hw_cntrs_hndl = mlx5_fc_create( mcounters->hw_cntrs_hndl = mlx5_fc_create(
to_mdev(ibcounters->device)->mdev, false); to_mdev(ibcounters->device)->mdev, false);
if (!mcounters->hw_cntrs_hndl) { if (IS_ERR(mcounters->hw_cntrs_hndl)) {
ret = -ENOMEM; ret = PTR_ERR(mcounters->hw_cntrs_hndl);
goto free; goto free;
} }
hw_hndl = true; hw_hndl = true;
...@@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, ...@@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
if (err) { if (err)
kfree(ucmd); goto free_ucmd;
return ERR_PTR(err);
}
} }
if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
return ERR_PTR(-ENOMEM); err = -ENOMEM;
goto free_ucmd;
}
if (domain != IB_FLOW_DOMAIN_USER || if (domain != IB_FLOW_DOMAIN_USER ||
flow_attr->port > dev->num_ports || flow_attr->port > dev->num_ports ||
(flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
IB_FLOW_ATTR_FLAGS_EGRESS))) IB_FLOW_ATTR_FLAGS_EGRESS))) {
return ERR_PTR(-EINVAL); err = -EINVAL;
goto free_ucmd;
}
if (is_egress && if (is_egress &&
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
return ERR_PTR(-EINVAL); err = -EINVAL;
goto free_ucmd;
}
dst = kzalloc(sizeof(*dst), GFP_KERNEL); dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) if (!dst) {
return ERR_PTR(-ENOMEM); err = -ENOMEM;
goto free_ucmd;
}
mutex_lock(&dev->flow_db->lock); mutex_lock(&dev->flow_db->lock);
...@@ -3637,8 +3643,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, ...@@ -3637,8 +3643,8 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
unlock: unlock:
mutex_unlock(&dev->flow_db->lock); mutex_unlock(&dev->flow_db->lock);
kfree(dst); kfree(dst);
free_ucmd:
kfree(ucmd); kfree(ucmd);
kfree(handler);
return ERR_PTR(err); return ERR_PTR(err);
} }
......
...@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} }
if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) { if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
if (rdma_protocol_iwarp(&dev->ibdev, 1))
return -EINVAL;
if (attr_mask & IB_QP_PATH_MTU) { if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) { attr->path_mtu > IB_MTU_4096) {
......
...@@ -645,6 +645,9 @@ int rxe_requester(void *arg) ...@@ -645,6 +645,9 @@ int rxe_requester(void *arg)
} else { } else {
goto exit; goto exit;
} }
if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
qp->sq_sig_type == IB_SIGNAL_ALL_WR)
rxe_run_task(&qp->comp.task, 1);
qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index = next_index(qp->sq.queue,
qp->req.wqe_index); qp->req.wqe_index);
goto next_wqe; goto next_wqe;
...@@ -709,6 +712,7 @@ int rxe_requester(void *arg) ...@@ -709,6 +712,7 @@ int rxe_requester(void *arg)
if (fill_packet(qp, wqe, &pkt, skb, payload)) { if (fill_packet(qp, wqe, &pkt, skb, payload)) {
pr_debug("qp#%d Error during fill packet\n", qp_num(qp)); pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
kfree_skb(skb);
goto err; goto err;
} }
...@@ -740,7 +744,6 @@ int rxe_requester(void *arg) ...@@ -740,7 +744,6 @@ int rxe_requester(void *arg)
goto next_wqe; goto next_wqe;
err: err:
kfree_skb(skb);
wqe->status = IB_WC_LOC_PROT_ERR; wqe->status = IB_WC_LOC_PROT_ERR;
wqe->state = wqe_state_error; wqe->state = wqe_state_error;
__rxe_do_task(&qp->comp.task); __rxe_do_task(&qp->comp.task);
......
...@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget); ...@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
* *
* Users can examine the cq structure to determine the actual CQ size. * Users can examine the cq structure to determine the actual CQ size.
*/ */
struct ib_cq *ib_create_cq(struct ib_device *device, struct ib_cq *__ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler, ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *), void (*event_handler)(struct ib_event *, void *),
void *cq_context, void *cq_context,
const struct ib_cq_init_attr *cq_attr); const struct ib_cq_init_attr *cq_attr,
const char *caller);
#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
__ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
/** /**
* ib_resize_cq - Modifies the capacity of the CQ. * ib_resize_cq - Modifies the capacity of the CQ.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment