Commit ef049c27 authored by Michael S. Tsirkin's avatar Michael S. Tsirkin Committed by Linus Torvalds

[PATCH] IB/mthca: improve CQ locking part 2

Locking during the poll cq operation can be reduced by locking the cq
while qp is being removed from the qp array.  This also avoids an
extra atomic operation for reference counting.
Signed-off-by: default avatarMichael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <roland@topspin.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 0b6e6a38
...@@ -418,14 +418,14 @@ static inline int mthca_poll_one(struct mthca_dev *dev, ...@@ -418,14 +418,14 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
spin_unlock(&(*cur_qp)->lock); spin_unlock(&(*cur_qp)->lock);
} }
spin_lock(&dev->qp_table.lock); /*
* We do not have to take the QP table lock here,
* because CQs will be locked while QPs are removed
* from the table.
*/
*cur_qp = mthca_array_get(&dev->qp_table.qp, *cur_qp = mthca_array_get(&dev->qp_table.qp,
be32_to_cpu(cqe->my_qpn) & be32_to_cpu(cqe->my_qpn) &
(dev->limits.num_qps - 1)); (dev->limits.num_qps - 1));
if (*cur_qp)
atomic_inc(&(*cur_qp)->refcount);
spin_unlock(&dev->qp_table.lock);
if (!*cur_qp) { if (!*cur_qp) {
mthca_warn(dev, "CQ entry for unknown QP %06x\n", mthca_warn(dev, "CQ entry for unknown QP %06x\n",
be32_to_cpu(cqe->my_qpn) & 0xffffff); be32_to_cpu(cqe->my_qpn) & 0xffffff);
...@@ -537,12 +537,8 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, ...@@ -537,12 +537,8 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
inc_cons_index(dev, cq, freed); inc_cons_index(dev, cq, freed);
} }
if (qp) { if (qp)
spin_unlock(&qp->lock); spin_unlock(&qp->lock);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
spin_unlock_irqrestore(&cq->lock, flags); spin_unlock_irqrestore(&cq->lock, flags);
......
...@@ -1083,9 +1083,21 @@ int mthca_alloc_sqp(struct mthca_dev *dev, ...@@ -1083,9 +1083,21 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
return 0; return 0;
err_out_free: err_out_free:
spin_lock_irq(&dev->qp_table.lock); /*
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
spin_lock_irq(&send_cq->lock);
if (send_cq != recv_cq)
spin_lock(&recv_cq->lock);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp, mqpn); mthca_array_clear(&dev->qp_table.qp, mqpn);
spin_unlock_irq(&dev->qp_table.lock); spin_unlock(&dev->qp_table.lock);
if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
err_out: err_out:
dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
...@@ -1100,11 +1112,28 @@ void mthca_free_qp(struct mthca_dev *dev, ...@@ -1100,11 +1112,28 @@ void mthca_free_qp(struct mthca_dev *dev,
u8 status; u8 status;
int size; int size;
int i; int i;
struct mthca_cq *send_cq;
struct mthca_cq *recv_cq;
spin_lock_irq(&dev->qp_table.lock); send_cq = to_mcq(qp->ibqp.send_cq);
recv_cq = to_mcq(qp->ibqp.recv_cq);
/*
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
spin_lock_irq(&send_cq->lock);
if (send_cq != recv_cq)
spin_lock(&recv_cq->lock);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp, mthca_array_clear(&dev->qp_table.qp,
qp->qpn & (dev->limits.num_qps - 1)); qp->qpn & (dev->limits.num_qps - 1));
spin_unlock_irq(&dev->qp_table.lock); spin_unlock(&dev->qp_table.lock);
if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
atomic_dec(&qp->refcount); atomic_dec(&qp->refcount);
wait_event(qp->wait, !atomic_read(&qp->refcount)); wait_event(qp->wait, !atomic_read(&qp->refcount));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment