Commit 0e44dc38 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/mthca: FMR ioremap fix
  IPoIB: Free child interfaces properly
  IB/mthca: Fix race in reference counting
  IB/srp: Fix tracking of pending requests during error handling
  IB: Fix display of 4-bit port counters in sysfs
parents 6314410d ce477ae4
...@@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, ...@@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
switch (width) { switch (width) {
case 4: case 4:
ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >>
(offset % 4)) & 0xf); (4 - (offset % 8))) & 0xf);
break; break;
case 8: case 8:
ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]);
......
...@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, ...@@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
spin_lock(&dev->cq_table.lock); spin_lock(&dev->cq_table.lock);
cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
if (cq) if (cq)
atomic_inc(&cq->refcount); ++cq->refcount;
spin_unlock(&dev->cq_table.lock); spin_unlock(&dev->cq_table.lock);
if (!cq) { if (!cq) {
...@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, ...@@ -254,8 +254,10 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
if (cq->ibcq.event_handler) if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&event, cq->ibcq.cq_context); cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
if (atomic_dec_and_test(&cq->refcount)) spin_lock(&dev->cq_table.lock);
if (!--cq->refcount)
wake_up(&cq->wait); wake_up(&cq->wait);
spin_unlock(&dev->cq_table.lock);
} }
static inline int is_recv_cqe(struct mthca_cqe *cqe) static inline int is_recv_cqe(struct mthca_cqe *cqe)
...@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe) ...@@ -267,23 +269,13 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe)
return !(cqe->is_send & 0x80); return !(cqe->is_send & 0x80);
} }
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
struct mthca_srq *srq) struct mthca_srq *srq)
{ {
struct mthca_cq *cq;
struct mthca_cqe *cqe; struct mthca_cqe *cqe;
u32 prod_index; u32 prod_index;
int nfreed = 0; int nfreed = 0;
spin_lock_irq(&dev->cq_table.lock);
cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);
spin_unlock_irq(&dev->cq_table.lock);
if (!cq)
return;
spin_lock_irq(&cq->lock); spin_lock_irq(&cq->lock);
/* /*
...@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, ...@@ -301,7 +293,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
if (0) if (0)
mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
qpn, cqn, cq->cons_index, prod_index); qpn, cq->cqn, cq->cons_index, prod_index);
/* /*
* Now sweep backwards through the CQ, removing CQ entries * Now sweep backwards through the CQ, removing CQ entries
...@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, ...@@ -325,8 +317,6 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
} }
spin_unlock_irq(&cq->lock); spin_unlock_irq(&cq->lock);
if (atomic_dec_and_test(&cq->refcount))
wake_up(&cq->wait);
} }
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
...@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -821,7 +811,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
} }
spin_lock_init(&cq->lock); spin_lock_init(&cq->lock);
atomic_set(&cq->refcount, 1); cq->refcount = 1;
init_waitqueue_head(&cq->wait); init_waitqueue_head(&cq->wait);
memset(cq_context, 0, sizeof *cq_context); memset(cq_context, 0, sizeof *cq_context);
...@@ -896,6 +886,17 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -896,6 +886,17 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
return err; return err;
} }
static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
{
int c;
spin_lock_irq(&dev->cq_table.lock);
c = cq->refcount;
spin_unlock_irq(&dev->cq_table.lock);
return c;
}
void mthca_free_cq(struct mthca_dev *dev, void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq) struct mthca_cq *cq)
{ {
...@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev, ...@@ -929,6 +930,7 @@ void mthca_free_cq(struct mthca_dev *dev,
spin_lock_irq(&dev->cq_table.lock); spin_lock_irq(&dev->cq_table.lock);
mthca_array_clear(&dev->cq_table.cq, mthca_array_clear(&dev->cq_table.cq,
cq->cqn & (dev->limits.num_cqs - 1)); cq->cqn & (dev->limits.num_cqs - 1));
--cq->refcount;
spin_unlock_irq(&dev->cq_table.lock); spin_unlock_irq(&dev->cq_table.lock);
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
...@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev, ...@@ -936,8 +938,7 @@ void mthca_free_cq(struct mthca_dev *dev,
else else
synchronize_irq(dev->pdev->irq); synchronize_irq(dev->pdev->irq);
atomic_dec(&cq->refcount); wait_event(cq->wait, !get_cq_refcount(dev, cq));
wait_event(cq->wait, !atomic_read(&cq->refcount));
if (cq->is_kernel) { if (cq->is_kernel) {
mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
......
...@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev, ...@@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev,
void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn, void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
enum ib_event_type event_type); enum ib_event_type event_type);
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
struct mthca_srq *srq); struct mthca_srq *srq);
void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
......
...@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) ...@@ -761,6 +761,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
int __devinit mthca_init_mr_table(struct mthca_dev *dev) int __devinit mthca_init_mr_table(struct mthca_dev *dev)
{ {
unsigned long addr;
int err, i; int err, i;
err = mthca_alloc_init(&dev->mr_table.mpt_alloc, err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
...@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) ...@@ -796,9 +797,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
goto err_fmr_mpt; goto err_fmr_mpt;
} }
addr = pci_resource_start(dev->pdev, 4) +
((pci_resource_len(dev->pdev, 4) - 1) &
dev->mr_table.mpt_base);
dev->mr_table.tavor_fmr.mpt_base = dev->mr_table.tavor_fmr.mpt_base =
ioremap(dev->mr_table.mpt_base, ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry));
(1 << i) * sizeof (struct mthca_mpt_entry));
if (!dev->mr_table.tavor_fmr.mpt_base) { if (!dev->mr_table.tavor_fmr.mpt_base) {
mthca_warn(dev, "MPT ioremap for FMR failed.\n"); mthca_warn(dev, "MPT ioremap for FMR failed.\n");
...@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) ...@@ -806,9 +810,12 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev)
goto err_fmr_mpt; goto err_fmr_mpt;
} }
addr = pci_resource_start(dev->pdev, 4) +
((pci_resource_len(dev->pdev, 4) - 1) &
dev->mr_table.mtt_base);
dev->mr_table.tavor_fmr.mtt_base = dev->mr_table.tavor_fmr.mtt_base =
ioremap(dev->mr_table.mtt_base, ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE);
(1 << i) * MTHCA_MTT_SEG_SIZE);
if (!dev->mr_table.tavor_fmr.mtt_base) { if (!dev->mr_table.tavor_fmr.mtt_base) {
mthca_warn(dev, "MTT ioremap for FMR failed.\n"); mthca_warn(dev, "MTT ioremap for FMR failed.\n");
err = -ENOMEM; err = -ENOMEM;
......
...@@ -139,11 +139,12 @@ struct mthca_ah { ...@@ -139,11 +139,12 @@ struct mthca_ah {
* a qp may be locked, with the send cq locked first. No other * a qp may be locked, with the send cq locked first. No other
* nesting should be done. * nesting should be done.
* *
* Each struct mthca_cq/qp also has an atomic_t ref count. The * Each struct mthca_cq/qp also has an ref count, protected by the
* pointer from the cq/qp_table to the struct counts as one reference. * corresponding table lock. The pointer from the cq/qp_table to the
* This reference also is good for access through the consumer API, so * struct counts as one reference. This reference also is good for
* modifying the CQ/QP etc doesn't need to take another reference. * access through the consumer API, so modifying the CQ/QP etc doesn't
* Access because of a completion being polled does need a reference. * need to take another reference. Access to a QP because of a
* completion being polled does not need a reference either.
* *
* Finally, each struct mthca_cq/qp has a wait_queue_head_t for the * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
* destroy function to sleep on. * destroy function to sleep on.
...@@ -159,8 +160,9 @@ struct mthca_ah { ...@@ -159,8 +160,9 @@ struct mthca_ah {
* - decrement ref count; if zero, wake up waiters * - decrement ref count; if zero, wake up waiters
* *
* To destroy a CQ/QP, we can do the following: * To destroy a CQ/QP, we can do the following:
* - lock cq/qp_table, remove pointer, unlock cq/qp_table lock * - lock cq/qp_table
* - decrement ref count * - remove pointer and decrement ref count
* - unlock cq/qp_table lock
* - wait_event until ref count is zero * - wait_event until ref count is zero
* *
* It is the consumer's responsibilty to make sure that no QP * It is the consumer's responsibilty to make sure that no QP
...@@ -197,7 +199,7 @@ struct mthca_cq_resize { ...@@ -197,7 +199,7 @@ struct mthca_cq_resize {
struct mthca_cq { struct mthca_cq {
struct ib_cq ibcq; struct ib_cq ibcq;
spinlock_t lock; spinlock_t lock;
atomic_t refcount; int refcount;
int cqn; int cqn;
u32 cons_index; u32 cons_index;
struct mthca_cq_buf buf; struct mthca_cq_buf buf;
...@@ -217,7 +219,7 @@ struct mthca_cq { ...@@ -217,7 +219,7 @@ struct mthca_cq {
struct mthca_srq { struct mthca_srq {
struct ib_srq ibsrq; struct ib_srq ibsrq;
spinlock_t lock; spinlock_t lock;
atomic_t refcount; int refcount;
int srqn; int srqn;
int max; int max;
int max_gs; int max_gs;
...@@ -254,7 +256,7 @@ struct mthca_wq { ...@@ -254,7 +256,7 @@ struct mthca_wq {
struct mthca_qp { struct mthca_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
atomic_t refcount; int refcount;
u32 qpn; u32 qpn;
int is_direct; int is_direct;
u8 port; /* for SQP and memfree use only */ u8 port; /* for SQP and memfree use only */
......
...@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, ...@@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
spin_lock(&dev->qp_table.lock); spin_lock(&dev->qp_table.lock);
qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
if (qp) if (qp)
atomic_inc(&qp->refcount); ++qp->refcount;
spin_unlock(&dev->qp_table.lock); spin_unlock(&dev->qp_table.lock);
if (!qp) { if (!qp) {
...@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, ...@@ -257,8 +257,10 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
if (qp->ibqp.event_handler) if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&event, qp->ibqp.qp_context); qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
if (atomic_dec_and_test(&qp->refcount)) spin_lock(&dev->qp_table.lock);
if (!--qp->refcount)
wake_up(&qp->wait); wake_up(&qp->wait);
spin_unlock(&dev->qp_table.lock);
} }
static int to_mthca_state(enum ib_qp_state ib_state) static int to_mthca_state(enum ib_qp_state ib_state)
...@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -833,10 +835,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
* entries and reinitialize the QP. * entries and reinitialize the QP.
*/ */
if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq) if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
mthca_wq_init(&qp->sq); mthca_wq_init(&qp->sq);
...@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, ...@@ -1096,7 +1098,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
int ret; int ret;
int i; int i;
atomic_set(&qp->refcount, 1); qp->refcount = 1;
init_waitqueue_head(&qp->wait); init_waitqueue_head(&qp->wait);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
qp->atomic_rd_en = 0; qp->atomic_rd_en = 0;
...@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev, ...@@ -1318,6 +1320,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
return err; return err;
} }
static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
{
int c;
spin_lock_irq(&dev->qp_table.lock);
c = qp->refcount;
spin_unlock_irq(&dev->qp_table.lock);
return c;
}
void mthca_free_qp(struct mthca_dev *dev, void mthca_free_qp(struct mthca_dev *dev,
struct mthca_qp *qp) struct mthca_qp *qp)
{ {
...@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev, ...@@ -1339,14 +1352,14 @@ void mthca_free_qp(struct mthca_dev *dev,
spin_lock(&dev->qp_table.lock); spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp, mthca_array_clear(&dev->qp_table.qp,
qp->qpn & (dev->limits.num_qps - 1)); qp->qpn & (dev->limits.num_qps - 1));
--qp->refcount;
spin_unlock(&dev->qp_table.lock); spin_unlock(&dev->qp_table.lock);
if (send_cq != recv_cq) if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock); spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock); spin_unlock_irq(&send_cq->lock);
atomic_dec(&qp->refcount); wait_event(qp->wait, !get_qp_refcount(dev, qp));
wait_event(qp->wait, !atomic_read(&qp->refcount));
if (qp->state != IB_QPS_RESET) if (qp->state != IB_QPS_RESET)
mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
...@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev, ...@@ -1358,10 +1371,10 @@ void mthca_free_qp(struct mthca_dev *dev,
* unref the mem-free tables and free the QPN in our table. * unref the mem-free tables and free the QPN in our table.
*/ */
if (!qp->ibqp.uobject) { if (!qp->ibqp.uobject) {
mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq) if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
mthca_free_memfree(dev, qp); mthca_free_memfree(dev, qp);
......
...@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, ...@@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
goto err_out_mailbox; goto err_out_mailbox;
spin_lock_init(&srq->lock); spin_lock_init(&srq->lock);
atomic_set(&srq->refcount, 1); srq->refcount = 1;
init_waitqueue_head(&srq->wait); init_waitqueue_head(&srq->wait);
if (mthca_is_memfree(dev)) if (mthca_is_memfree(dev))
...@@ -308,6 +308,17 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, ...@@ -308,6 +308,17 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
return err; return err;
} }
static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
{
int c;
spin_lock_irq(&dev->srq_table.lock);
c = srq->refcount;
spin_unlock_irq(&dev->srq_table.lock);
return c;
}
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
{ {
struct mthca_mailbox *mailbox; struct mthca_mailbox *mailbox;
...@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) ...@@ -329,10 +340,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
spin_lock_irq(&dev->srq_table.lock); spin_lock_irq(&dev->srq_table.lock);
mthca_array_clear(&dev->srq_table.srq, mthca_array_clear(&dev->srq_table.srq,
srq->srqn & (dev->limits.num_srqs - 1)); srq->srqn & (dev->limits.num_srqs - 1));
--srq->refcount;
spin_unlock_irq(&dev->srq_table.lock); spin_unlock_irq(&dev->srq_table.lock);
atomic_dec(&srq->refcount); wait_event(srq->wait, !get_srq_refcount(dev, srq));
wait_event(srq->wait, !atomic_read(&srq->refcount));
if (!srq->ibsrq.uobject) { if (!srq->ibsrq.uobject) {
mthca_free_srq_buf(dev, srq); mthca_free_srq_buf(dev, srq);
...@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, ...@@ -414,7 +425,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
spin_lock(&dev->srq_table.lock); spin_lock(&dev->srq_table.lock);
srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
if (srq) if (srq)
atomic_inc(&srq->refcount); ++srq->refcount;
spin_unlock(&dev->srq_table.lock); spin_unlock(&dev->srq_table.lock);
if (!srq) { if (!srq) {
...@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, ...@@ -431,8 +442,10 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
out: out:
if (atomic_dec_and_test(&srq->refcount)) spin_lock(&dev->srq_table.lock);
if (!--srq->refcount)
wake_up(&srq->wait); wake_up(&srq->wait);
spin_unlock(&dev->srq_table.lock);
} }
/* /*
......
...@@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) ...@@ -158,10 +158,8 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (priv->pkey == pkey) { if (priv->pkey == pkey) {
unregister_netdev(priv->dev); unregister_netdev(priv->dev);
ipoib_dev_cleanup(priv->dev); ipoib_dev_cleanup(priv->dev);
list_del(&priv->list); list_del(&priv->list);
free_netdev(priv->dev);
kfree(priv);
ret = 0; ret = 0;
break; break;
......
...@@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target) ...@@ -409,6 +409,34 @@ static int srp_connect_target(struct srp_target_port *target)
} }
} }
static void srp_unmap_data(struct scsi_cmnd *scmnd,
struct srp_target_port *target,
struct srp_request *req)
{
struct scatterlist *scat;
int nents;
if (!scmnd->request_buffer ||
(scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE))
return;
/*
* This handling of non-SG commands can be killed when the
* SCSI midlayer no longer generates non-SG commands.
*/
if (likely(scmnd->use_sg)) {
nents = scmnd->use_sg;
scat = scmnd->request_buffer;
} else {
nents = 1;
scat = &req->fake_sg;
}
dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
scmnd->sc_data_direction);
}
static int srp_reconnect_target(struct srp_target_port *target) static int srp_reconnect_target(struct srp_target_port *target)
{ {
struct ib_cm_id *new_cm_id; struct ib_cm_id *new_cm_id;
...@@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target) ...@@ -455,16 +483,16 @@ static int srp_reconnect_target(struct srp_target_port *target)
list_for_each_entry(req, &target->req_queue, list) { list_for_each_entry(req, &target->req_queue, list) {
req->scmnd->result = DID_RESET << 16; req->scmnd->result = DID_RESET << 16;
req->scmnd->scsi_done(req->scmnd); req->scmnd->scsi_done(req->scmnd);
srp_unmap_data(req->scmnd, target, req);
} }
target->rx_head = 0; target->rx_head = 0;
target->tx_head = 0; target->tx_head = 0;
target->tx_tail = 0; target->tx_tail = 0;
target->req_head = 0; INIT_LIST_HEAD(&target->free_reqs);
for (i = 0; i < SRP_SQ_SIZE - 1; ++i)
target->req_ring[i].next = i + 1;
target->req_ring[SRP_SQ_SIZE - 1].next = -1;
INIT_LIST_HEAD(&target->req_queue); INIT_LIST_HEAD(&target->req_queue);
for (i = 0; i < SRP_SQ_SIZE; ++i)
list_add_tail(&target->req_ring[i].list, &target->free_reqs);
ret = srp_connect_target(target); ret = srp_connect_target(target);
if (ret) if (ret)
...@@ -589,40 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, ...@@ -589,40 +617,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
return len; return len;
} }
static void srp_unmap_data(struct scsi_cmnd *scmnd, static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
struct srp_target_port *target,
struct srp_request *req)
{ {
struct scatterlist *scat; srp_unmap_data(req->scmnd, target, req);
int nents; list_move_tail(&req->list, &target->free_reqs);
if (!scmnd->request_buffer ||
(scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE))
return;
/*
* This handling of non-SG commands can be killed when the
* SCSI midlayer no longer generates non-SG commands.
*/
if (likely(scmnd->use_sg)) {
nents = scmnd->use_sg;
scat = scmnd->request_buffer;
} else {
nents = 1;
scat = &req->fake_sg;
}
dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
scmnd->sc_data_direction);
}
static void srp_remove_req(struct srp_target_port *target, struct srp_request *req,
int index)
{
list_del(&req->list);
req->next = target->req_head;
target->req_head = index;
} }
static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
...@@ -665,14 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) ...@@ -665,14 +663,11 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
srp_unmap_data(scmnd, target, req);
if (!req->tsk_mgmt) { if (!req->tsk_mgmt) {
req->scmnd = NULL;
scmnd->host_scribble = (void *) -1L; scmnd->host_scribble = (void *) -1L;
scmnd->scsi_done(scmnd); scmnd->scsi_done(scmnd);
srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); srp_remove_req(target, req);
} else } else
req->cmd_done = 1; req->cmd_done = 1;
} }
...@@ -859,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, ...@@ -859,7 +854,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
struct srp_request *req; struct srp_request *req;
struct srp_iu *iu; struct srp_iu *iu;
struct srp_cmd *cmd; struct srp_cmd *cmd;
long req_index;
int len; int len;
if (target->state == SRP_TARGET_CONNECTING) if (target->state == SRP_TARGET_CONNECTING)
...@@ -879,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, ...@@ -879,22 +873,20 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma,
SRP_MAX_IU_LEN, DMA_TO_DEVICE); SRP_MAX_IU_LEN, DMA_TO_DEVICE);
req_index = target->req_head; req = list_entry(target->free_reqs.next, struct srp_request, list);
scmnd->scsi_done = done; scmnd->scsi_done = done;
scmnd->result = 0; scmnd->result = 0;
scmnd->host_scribble = (void *) req_index; scmnd->host_scribble = (void *) (long) req->index;
cmd = iu->buf; cmd = iu->buf;
memset(cmd, 0, sizeof *cmd); memset(cmd, 0, sizeof *cmd);
cmd->opcode = SRP_CMD; cmd->opcode = SRP_CMD;
cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
cmd->tag = req_index; cmd->tag = req->index;
memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
req = &target->req_ring[req_index];
req->scmnd = scmnd; req->scmnd = scmnd;
req->cmd = iu; req->cmd = iu;
req->cmd_done = 0; req->cmd_done = 0;
...@@ -919,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, ...@@ -919,8 +911,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
goto err_unmap; goto err_unmap;
} }
target->req_head = req->next; list_move_tail(&req->list, &target->req_queue);
list_add_tail(&req->list, &target->req_queue);
return 0; return 0;
...@@ -1143,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) ...@@ -1143,30 +1134,20 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0; return 0;
} }
static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) static int srp_send_tsk_mgmt(struct srp_target_port *target,
struct srp_request *req, u8 func)
{ {
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req;
struct srp_iu *iu; struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt; struct srp_tsk_mgmt *tsk_mgmt;
int req_index;
int ret = FAILED;
spin_lock_irq(target->scsi_host->host_lock); spin_lock_irq(target->scsi_host->host_lock);
if (target->state == SRP_TARGET_DEAD || if (target->state == SRP_TARGET_DEAD ||
target->state == SRP_TARGET_REMOVED) { target->state == SRP_TARGET_REMOVED) {
scmnd->result = DID_BAD_TARGET << 16; req->scmnd->result = DID_BAD_TARGET << 16;
goto out; goto out;
} }
if (scmnd->host_scribble == (void *) -1L)
goto out;
req_index = (long) scmnd->host_scribble;
printk(KERN_ERR "Abort for req_index %d\n", req_index);
req = &target->req_ring[req_index];
init_completion(&req->done); init_completion(&req->done);
iu = __srp_get_tx_iu(target); iu = __srp_get_tx_iu(target);
...@@ -1177,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) ...@@ -1177,10 +1158,10 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
memset(tsk_mgmt, 0, sizeof *tsk_mgmt); memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
tsk_mgmt->opcode = SRP_TSK_MGMT; tsk_mgmt->opcode = SRP_TSK_MGMT;
tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
tsk_mgmt->tsk_mgmt_func = func; tsk_mgmt->tsk_mgmt_func = func;
tsk_mgmt->task_tag = req_index; tsk_mgmt->task_tag = req->index;
if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
goto out; goto out;
...@@ -1188,37 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) ...@@ -1188,37 +1169,85 @@ static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func)
req->tsk_mgmt = iu; req->tsk_mgmt = iu;
spin_unlock_irq(target->scsi_host->host_lock); spin_unlock_irq(target->scsi_host->host_lock);
if (!wait_for_completion_timeout(&req->done, if (!wait_for_completion_timeout(&req->done,
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
return FAILED; return -1;
spin_lock_irq(target->scsi_host->host_lock);
if (req->cmd_done) { return 0;
srp_remove_req(target, req, req_index);
scmnd->scsi_done(scmnd);
} else if (!req->tsk_status) {
srp_remove_req(target, req, req_index);
scmnd->result = DID_ABORT << 16;
ret = SUCCESS;
}
out: out:
spin_unlock_irq(target->scsi_host->host_lock); spin_unlock_irq(target->scsi_host->host_lock);
return ret; return -1;
}
static int srp_find_req(struct srp_target_port *target,
struct scsi_cmnd *scmnd,
struct srp_request **req)
{
if (scmnd->host_scribble == (void *) -1L)
return -1;
*req = &target->req_ring[(long) scmnd->host_scribble];
return 0;
} }
static int srp_abort(struct scsi_cmnd *scmnd) static int srp_abort(struct scsi_cmnd *scmnd)
{ {
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req;
int ret = SUCCESS;
printk(KERN_ERR "SRP abort called\n"); printk(KERN_ERR "SRP abort called\n");
return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); if (srp_find_req(target, scmnd, &req))
return FAILED;
if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
return FAILED;
spin_lock_irq(target->scsi_host->host_lock);
if (req->cmd_done) {
srp_remove_req(target, req);
scmnd->scsi_done(scmnd);
} else if (!req->tsk_status) {
srp_remove_req(target, req);
scmnd->result = DID_ABORT << 16;
} else
ret = FAILED;
spin_unlock_irq(target->scsi_host->host_lock);
return ret;
} }
static int srp_reset_device(struct scsi_cmnd *scmnd) static int srp_reset_device(struct scsi_cmnd *scmnd)
{ {
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req, *tmp;
printk(KERN_ERR "SRP reset_device called\n"); printk(KERN_ERR "SRP reset_device called\n");
return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); if (srp_find_req(target, scmnd, &req))
return FAILED;
if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
return FAILED;
if (req->tsk_status)
return FAILED;
spin_lock_irq(target->scsi_host->host_lock);
list_for_each_entry_safe(req, tmp, &target->req_queue, list)
if (req->scmnd->device == scmnd->device) {
req->scmnd->result = DID_RESET << 16;
scmnd->scsi_done(scmnd);
srp_remove_req(target, req);
}
spin_unlock_irq(target->scsi_host->host_lock);
return SUCCESS;
} }
static int srp_reset_host(struct scsi_cmnd *scmnd) static int srp_reset_host(struct scsi_cmnd *scmnd)
...@@ -1518,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev, ...@@ -1518,10 +1547,12 @@ static ssize_t srp_create_target(struct class_device *class_dev,
INIT_WORK(&target->work, srp_reconnect_work, target); INIT_WORK(&target->work, srp_reconnect_work, target);
for (i = 0; i < SRP_SQ_SIZE - 1; ++i) INIT_LIST_HEAD(&target->free_reqs);
target->req_ring[i].next = i + 1;
target->req_ring[SRP_SQ_SIZE - 1].next = -1;
INIT_LIST_HEAD(&target->req_queue); INIT_LIST_HEAD(&target->req_queue);
for (i = 0; i < SRP_SQ_SIZE; ++i) {
target->req_ring[i].index = i;
list_add_tail(&target->req_ring[i].list, &target->free_reqs);
}
ret = srp_parse_options(buf, target); ret = srp_parse_options(buf, target);
if (ret) if (ret)
......
...@@ -101,7 +101,7 @@ struct srp_request { ...@@ -101,7 +101,7 @@ struct srp_request {
*/ */
struct scatterlist fake_sg; struct scatterlist fake_sg;
struct completion done; struct completion done;
short next; short index;
u8 cmd_done; u8 cmd_done;
u8 tsk_status; u8 tsk_status;
}; };
...@@ -133,7 +133,7 @@ struct srp_target_port { ...@@ -133,7 +133,7 @@ struct srp_target_port {
unsigned tx_tail; unsigned tx_tail;
struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; struct srp_iu *tx_ring[SRP_SQ_SIZE + 1];
int req_head; struct list_head free_reqs;
struct list_head req_queue; struct list_head req_queue;
struct srp_request req_ring[SRP_SQ_SIZE]; struct srp_request req_ring[SRP_SQ_SIZE];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment