Commit 86e67a07 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/iser: Add missing counter increment in iser_data_buf_aligned_len()
  IB/ehca: Fix static rate regression
  mlx4_core: Fix state check in mlx4_qp_modify()
  IB/ipath: Normalize error return codes for posting work requests
  IB/ipath: Fix offset returned to ibv_modify_srq()
  IB/ipath: Fix error path in QP creation
  IB/ipath: Fix offset returned to ibv_resize_cq()
parents febb1877 a316b79c
......@@ -1203,7 +1203,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
mqpcb->service_level = attr->ah_attr.sl;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
if (ehca_calc_ipd(shca, my_qp->init_attr.port_num,
if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
attr->ah_attr.static_rate,
&mqpcb->max_static_rate)) {
ret = -EINVAL;
......@@ -1302,7 +1302,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
mqpcb->service_level_al = attr->alt_ah_attr.sl;
if (ehca_calc_ipd(shca, my_qp->init_attr.port_num,
if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
attr->alt_ah_attr.static_rate,
&mqpcb->max_static_rate_al)) {
ret = -EINVAL;
......
......@@ -395,12 +395,9 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
goto bail;
}
/*
* Return the address of the WC as the offset to mmap.
* See ipath_mmap() for details.
*/
/* Check that we can write the offset to mmap. */
if (udata && udata->outlen >= sizeof(__u64)) {
__u64 offset = (__u64) wc;
__u64 offset = 0;
ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (ret)
......@@ -450,6 +447,18 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
struct ipath_mmap_info *ip = cq->ip;
ipath_update_mmap_info(dev, ip, sz, wc);
/*
* Return the offset to mmap.
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
goto bail;
}
spin_lock_irq(&dev->pending_lock);
if (list_empty(&ip->pending_mmaps))
list_add(&ip->pending_mmaps, &dev->pending_mmaps);
......
......@@ -835,7 +835,8 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
init_attr->qp_type);
if (err) {
ret = ERR_PTR(err);
goto bail_rwq;
vfree(qp->r_rq.wq);
goto bail_qp;
}
qp->ip = NULL;
ipath_reset_qp(qp);
......@@ -863,7 +864,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
sizeof(offset));
if (err) {
ret = ERR_PTR(err);
goto bail_rwq;
goto bail_ip;
}
} else {
u32 s = sizeof(struct ipath_rwq) +
......@@ -875,7 +876,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
qp->r_rq.wq);
if (!qp->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_rwq;
goto bail_ip;
}
err = ib_copy_to_udata(udata, &(qp->ip->offset),
......@@ -907,9 +908,11 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
goto bail;
bail_ip:
kfree(qp->ip);
bail_rwq:
vfree(qp->r_rq.wq);
if (qp->ip)
kref_put(&qp->ip->ref, ipath_release_mmap_info);
else
vfree(qp->r_rq.wq);
ipath_free_qp(&dev->qp_table, qp);
bail_qp:
kfree(qp);
bail_swq:
......
......@@ -59,7 +59,7 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if ((unsigned) wr->num_sge > srq->rq.max_sge) {
*bad_wr = wr;
ret = -ENOMEM;
ret = -EINVAL;
goto bail;
}
......@@ -211,11 +211,11 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
struct ib_udata *udata)
{
struct ipath_srq *srq = to_isrq(ibsrq);
struct ipath_rwq *wq;
int ret = 0;
if (attr_mask & IB_SRQ_MAX_WR) {
struct ipath_rwq *owq;
struct ipath_rwq *wq;
struct ipath_rwqe *p;
u32 sz, size, n, head, tail;
......@@ -236,27 +236,20 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
goto bail;
}
/*
* Return the address of the RWQ as the offset to mmap.
* See ipath_mmap() for details.
*/
/* Check that we can write the offset to mmap. */
if (udata && udata->inlen >= sizeof(__u64)) {
__u64 offset_addr;
__u64 offset = (__u64) wq;
__u64 offset = 0;
ret = ib_copy_from_udata(&offset_addr, udata,
sizeof(offset_addr));
if (ret) {
vfree(wq);
goto bail;
}
if (ret)
goto bail_free;
udata->outbuf = (void __user *) offset_addr;
ret = ib_copy_to_udata(udata, &offset,
sizeof(offset));
if (ret) {
vfree(wq);
goto bail;
}
if (ret)
goto bail_free;
}
spin_lock_irq(&srq->rq.lock);
......@@ -277,10 +270,8 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
else
n -= tail;
if (size <= n) {
spin_unlock_irq(&srq->rq.lock);
vfree(wq);
ret = -EINVAL;
goto bail;
goto bail_unlock;
}
n = 0;
p = wq->wq;
......@@ -314,6 +305,18 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
u32 s = sizeof(struct ipath_rwq) + size * sz;
ipath_update_mmap_info(dev, ip, s, wq);
/*
* Return the offset to mmap.
* See ipath_mmap() for details.
*/
if (udata && udata->inlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
goto bail;
}
spin_lock_irq(&dev->pending_lock);
if (list_empty(&ip->pending_mmaps))
list_add(&ip->pending_mmaps,
......@@ -328,7 +331,12 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
srq->limit = attr->srq_limit;
spin_unlock_irq(&srq->rq.lock);
}
goto bail;
bail_unlock:
spin_unlock_irq(&srq->rq.lock);
bail_free:
vfree(wq);
bail:
return ret;
}
......
......@@ -302,8 +302,10 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
next = qp->s_head + 1;
if (next >= qp->s_size)
next = 0;
if (next == qp->s_last)
goto bail_inval;
if (next == qp->s_last) {
ret = -ENOMEM;
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_head);
wqe->wr = *wr;
......@@ -404,7 +406,7 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
*bad_wr = wr;
ret = -ENOMEM;
ret = -EINVAL;
goto bail;
}
......
......@@ -310,13 +310,15 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
if (i + 1 < data->dma_nents) {
next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
/* are i, i+1 fragments of the same page? */
if (end_addr == next_addr)
if (end_addr == next_addr) {
cnt++;
continue;
else if (!IS_4K_ALIGNED(end_addr)) {
} else if (!IS_4K_ALIGNED(end_addr)) {
ret_len = cnt + 1;
break;
}
}
cnt++;
}
if (i == data->dma_nents)
ret_len = cnt; /* loop ended */
......
......@@ -113,7 +113,7 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
if (cur_state >= MLX4_QP_NUM_STATE || cur_state >= MLX4_QP_NUM_STATE ||
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment