Commit 1ea09758 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB/cm: Drain cq in ipoib_cm_dev_stop()
  IPoIB/cm: Fix timeout check in ipoib_cm_dev_stop()
  IB/ehca: Fix number of send WRs reported for new QP
  IB/mlx4: Initialize send queue entry ownership bits
  IB/mlx4: Don't allocate RQ doorbell if using SRQ
parents a41d7f00 2dfbfc37
...@@ -331,7 +331,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, ...@@ -331,7 +331,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
0); 0);
qp->ipz_qp_handle.handle = outs[0]; qp->ipz_qp_handle.handle = outs[0];
qp->real_qp_num = (u32)outs[1]; qp->real_qp_num = (u32)outs[1];
parms->act_nr_send_sges = parms->act_nr_send_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
parms->act_nr_recv_wqes = parms->act_nr_recv_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]); (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
......
...@@ -270,9 +270,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -270,9 +270,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
{ {
struct mlx4_wqe_ctrl_seg *ctrl;
int err; int err;
int i;
mutex_init(&qp->mutex); mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->sq.lock);
...@@ -319,20 +317,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -319,20 +317,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err) if (err)
goto err_mtt; goto err_mtt;
if (!init_attr->srq) {
err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
ucmd.db_addr, &qp->db); ucmd.db_addr, &qp->db);
if (err) if (err)
goto err_mtt; goto err_mtt;
}
} else { } else {
err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
if (err) if (err)
goto err; goto err;
if (!init_attr->srq) {
err = mlx4_ib_db_alloc(dev, &qp->db, 0); err = mlx4_ib_db_alloc(dev, &qp->db, 0);
if (err) if (err)
goto err; goto err;
*qp->db.db = 0; *qp->db.db = 0;
}
if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) { if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
err = -ENOMEM; err = -ENOMEM;
...@@ -348,11 +350,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -348,11 +350,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err) if (err)
goto err_mtt; goto err_mtt;
for (i = 0; i < qp->sq.max; ++i) {
ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31);
}
qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL); qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL);
qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL); qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL);
...@@ -386,7 +383,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -386,7 +383,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
return 0; return 0;
err_wrid: err_wrid:
if (pd->uobject) if (pd->uobject && !init_attr->srq)
mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
else { else {
kfree(qp->sq.wrid); kfree(qp->sq.wrid);
...@@ -403,7 +400,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -403,7 +400,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
err_db: err_db:
if (!pd->uobject) if (!pd->uobject && !init_attr->srq)
mlx4_ib_db_free(dev, &qp->db); mlx4_ib_db_free(dev, &qp->db);
err: err:
...@@ -481,6 +478,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, ...@@ -481,6 +478,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_mtt_cleanup(dev->dev, &qp->mtt); mlx4_mtt_cleanup(dev->dev, &qp->mtt);
if (is_user) { if (is_user) {
if (!qp->ibqp.srq)
mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
&qp->db); &qp->db);
ib_umem_release(qp->umem); ib_umem_release(qp->umem);
...@@ -488,6 +486,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, ...@@ -488,6 +486,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
kfree(qp->sq.wrid); kfree(qp->sq.wrid);
kfree(qp->rq.wrid); kfree(qp->rq.wrid);
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
if (!qp->ibqp.srq)
mlx4_ib_db_free(dev, &qp->db); mlx4_ib_db_free(dev, &qp->db);
} }
} }
...@@ -852,7 +851,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -852,7 +851,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (ibqp->srq) if (ibqp->srq)
context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn); context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) if (!ibqp->srq && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->db_rec_addr = cpu_to_be64(qp->db.dma); context->db_rec_addr = cpu_to_be64(qp->db.dma);
if (cur_state == IB_QPS_INIT && if (cur_state == IB_QPS_INIT &&
...@@ -872,6 +871,21 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -872,6 +871,21 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
else else
sqd_event = 0; sqd_event = 0;
/*
* Before passing a kernel QP to the HW, make sure that the
* ownership bits of the send queue are set so that the
* hardware doesn't start processing stale work requests.
*/
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
struct mlx4_wqe_ctrl_seg *ctrl;
int i;
for (i = 0; i < qp->sq.max; ++i) {
ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31);
}
}
err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
to_mlx4_state(new_state), context, optpar, to_mlx4_state(new_state), context, optpar,
sqd_event, &qp->mqp); sqd_event, &qp->mqp);
...@@ -919,6 +933,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -919,6 +933,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->rq.tail = 0; qp->rq.tail = 0;
qp->sq.head = 0; qp->sq.head = 0;
qp->sq.tail = 0; qp->sq.tail = 0;
if (!ibqp->srq)
*qp->db.db = 0; *qp->db.db = 0;
} }
......
...@@ -429,6 +429,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); ...@@ -429,6 +429,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
void ipoib_pkey_poll(struct work_struct *work); void ipoib_pkey_poll(struct work_struct *work);
int ipoib_pkey_dev_delay_open(struct net_device *dev); int ipoib_pkey_dev_delay_open(struct net_device *dev);
void ipoib_drain_cq(struct net_device *dev);
#ifdef CONFIG_INFINIBAND_IPOIB_CM #ifdef CONFIG_INFINIBAND_IPOIB_CM
......
...@@ -713,7 +713,7 @@ void ipoib_cm_dev_stop(struct net_device *dev) ...@@ -713,7 +713,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
while (!list_empty(&priv->cm.rx_error_list) || while (!list_empty(&priv->cm.rx_error_list) ||
!list_empty(&priv->cm.rx_flush_list) || !list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list)) { !list_empty(&priv->cm.rx_drain_list)) {
if (!time_after(jiffies, begin + 5 * HZ)) { if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv, "RX drain timing out\n"); ipoib_warn(priv, "RX drain timing out\n");
/* /*
...@@ -726,6 +726,7 @@ void ipoib_cm_dev_stop(struct net_device *dev) ...@@ -726,6 +726,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
} }
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
msleep(1); msleep(1);
ipoib_drain_cq(dev);
spin_lock_irq(&priv->lock); spin_lock_irq(&priv->lock);
} }
......
...@@ -550,13 +550,30 @@ static int recvs_pending(struct net_device *dev) ...@@ -550,13 +550,30 @@ static int recvs_pending(struct net_device *dev)
return pending; return pending;
} }
void ipoib_drain_cq(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i, n;
do {
n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
for (i = 0; i < n; ++i) {
if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ)
ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV)
ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
else
ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
}
} while (n == IPOIB_NUM_WC);
}
int ipoib_ib_dev_stop(struct net_device *dev, int flush) int ipoib_ib_dev_stop(struct net_device *dev, int flush)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr; struct ib_qp_attr qp_attr;
unsigned long begin; unsigned long begin;
struct ipoib_tx_buf *tx_req; struct ipoib_tx_buf *tx_req;
int i, n; int i;
clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
netif_poll_disable(dev); netif_poll_disable(dev);
...@@ -611,17 +628,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) ...@@ -611,17 +628,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
goto timeout; goto timeout;
} }
do { ipoib_drain_cq(dev);
n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
for (i = 0; i < n; ++i) {
if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ)
ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV)
ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
else
ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
}
} while (n == IPOIB_NUM_WC);
msleep(1); msleep(1);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment