Commit ffc26907 authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford

IB/qib: Remove driver specific members from qib qp type

In preparation for moving the queue pair data structure to rdmavt the
members of the driver specific queue pairs which are not common need to be
pushed off to a private driver structure. This structure will be available
in the queue pair once moved to rdmavt as a void pointer. This patch while
not adding a lot of value in and of itself is a prerequisite to move the
queue pair out of the drivers and into rdmavt.

The driver specific, private queue pair data structure should condense as
more of the send side code moves to rdmavt.
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 869a2a96
...@@ -371,10 +371,11 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) ...@@ -371,10 +371,11 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
*/ */
static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
{ {
struct qib_qp_priv *priv = qp->priv;
qp->remote_qpn = 0; qp->remote_qpn = 0;
qp->qkey = 0; qp->qkey = 0;
qp->qp_access_flags = 0; qp->qp_access_flags = 0;
atomic_set(&qp->s_dma_busy, 0); atomic_set(&priv->s_dma_busy, 0);
qp->s_flags &= QIB_S_SIGNAL_REQ_WR; qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
qp->s_wqe = NULL; qp->s_wqe = NULL;
...@@ -474,6 +475,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) ...@@ -474,6 +475,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
*/ */
int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
{ {
struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc; struct ib_wc wc;
int ret = 0; int ret = 0;
...@@ -492,9 +494,9 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) ...@@ -492,9 +494,9 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
spin_lock(&dev->pending_lock); spin_lock(&dev->pending_lock);
if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { if (!list_empty(&priv->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
qp->s_flags &= ~QIB_S_ANY_WAIT_IO; qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
list_del_init(&qp->iowait); list_del_init(&priv->iowait);
} }
spin_unlock(&dev->pending_lock); spin_unlock(&dev->pending_lock);
...@@ -504,9 +506,9 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) ...@@ -504,9 +506,9 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
qib_put_mr(qp->s_rdma_mr); qib_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL; qp->s_rdma_mr = NULL;
} }
if (qp->s_tx) { if (priv->s_tx) {
qib_put_txreq(qp->s_tx); qib_put_txreq(priv->s_tx);
qp->s_tx = NULL; priv->s_tx = NULL;
} }
} }
...@@ -572,6 +574,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -572,6 +574,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{ {
struct qib_ibdev *dev = to_idev(ibqp->device); struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_qp *qp = to_iqp(ibqp); struct qib_qp *qp = to_iqp(ibqp);
struct qib_qp_priv *priv = qp->priv;
enum ib_qp_state cur_state, new_state; enum ib_qp_state cur_state, new_state;
struct ib_event ev; struct ib_event ev;
int lastwqe = 0; int lastwqe = 0;
...@@ -699,19 +702,20 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -699,19 +702,20 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (qp->state != IB_QPS_RESET) { if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
spin_lock(&dev->pending_lock); spin_lock(&dev->pending_lock);
if (!list_empty(&qp->iowait)) if (!list_empty(&priv->iowait))
list_del_init(&qp->iowait); list_del_init(&priv->iowait);
spin_unlock(&dev->pending_lock); spin_unlock(&dev->pending_lock);
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
spin_unlock(&qp->s_lock); spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock); spin_unlock_irq(&qp->r_lock);
/* Stop the sending work queue and retry timer */ /* Stop the sending work queue and retry timer */
cancel_work_sync(&qp->s_work); cancel_work_sync(&priv->s_work);
del_timer_sync(&qp->s_timer); del_timer_sync(&qp->s_timer);
wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); wait_event(priv->wait_dma,
if (qp->s_tx) { !atomic_read(&priv->s_dma_busy));
qib_put_txreq(qp->s_tx); if (priv->s_tx) {
qp->s_tx = NULL; qib_put_txreq(priv->s_tx);
priv->s_tx = NULL;
} }
remove_qp(dev, qp); remove_qp(dev, qp);
wait_event(qp->wait, !atomic_read(&qp->refcount)); wait_event(qp->wait, !atomic_read(&qp->refcount));
...@@ -987,7 +991,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, ...@@ -987,7 +991,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
size_t sg_list_sz; size_t sg_list_sz;
struct ib_qp *ret; struct ib_qp *ret;
gfp_t gfp; gfp_t gfp;
struct qib_qp_priv *priv;
if (init_attr->cap.max_send_sge > ib_qib_max_sges || if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
init_attr->cap.max_send_wr > ib_qib_max_qp_wrs || init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
...@@ -1055,11 +1059,18 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, ...@@ -1055,11 +1059,18 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
goto bail_swq; goto bail_swq;
} }
RCU_INIT_POINTER(qp->next, NULL); RCU_INIT_POINTER(qp->next, NULL);
qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp); priv = kzalloc(sizeof(*priv), gfp);
if (!qp->s_hdr) { if (!priv) {
ret = ERR_PTR(-ENOMEM);
goto bail_qp_hdr;
}
priv->owner = qp;
priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
if (!priv->s_hdr) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto bail_qp; goto bail_qp;
} }
qp->priv = priv;
qp->timeout_jiffies = qp->timeout_jiffies =
usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1000UL); 1000UL);
...@@ -1095,11 +1106,11 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, ...@@ -1095,11 +1106,11 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
spin_lock_init(&qp->r_rq.lock); spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0); atomic_set(&qp->refcount, 0);
init_waitqueue_head(&qp->wait); init_waitqueue_head(&qp->wait);
init_waitqueue_head(&qp->wait_dma); init_waitqueue_head(&priv->wait_dma);
init_timer(&qp->s_timer); init_timer(&qp->s_timer);
qp->s_timer.data = (unsigned long)qp; qp->s_timer.data = (unsigned long)qp;
INIT_WORK(&qp->s_work, qib_do_send); INIT_WORK(&priv->s_work, qib_do_send);
INIT_LIST_HEAD(&qp->iowait); INIT_LIST_HEAD(&priv->iowait);
INIT_LIST_HEAD(&qp->rspwait); INIT_LIST_HEAD(&qp->rspwait);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
qp->s_wq = swq; qp->s_wq = swq;
...@@ -1189,7 +1200,9 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, ...@@ -1189,7 +1200,9 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
vfree(qp->r_rq.wq); vfree(qp->r_rq.wq);
free_qpn(&dev->qpn_table, qp->ibqp.qp_num); free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
bail_qp: bail_qp:
kfree(qp->s_hdr); kfree(priv->s_hdr);
kfree(priv);
bail_qp_hdr:
kfree(qp); kfree(qp);
bail_swq: bail_swq:
vfree(swq); vfree(swq);
...@@ -1210,23 +1223,24 @@ int qib_destroy_qp(struct ib_qp *ibqp) ...@@ -1210,23 +1223,24 @@ int qib_destroy_qp(struct ib_qp *ibqp)
{ {
struct qib_qp *qp = to_iqp(ibqp); struct qib_qp *qp = to_iqp(ibqp);
struct qib_ibdev *dev = to_idev(ibqp->device); struct qib_ibdev *dev = to_idev(ibqp->device);
struct qib_qp_priv *priv = qp->priv;
/* Make sure HW and driver activity is stopped. */ /* Make sure HW and driver activity is stopped. */
spin_lock_irq(&qp->s_lock); spin_lock_irq(&qp->s_lock);
if (qp->state != IB_QPS_RESET) { if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
spin_lock(&dev->pending_lock); spin_lock(&dev->pending_lock);
if (!list_empty(&qp->iowait)) if (!list_empty(&priv->iowait))
list_del_init(&qp->iowait); list_del_init(&priv->iowait);
spin_unlock(&dev->pending_lock); spin_unlock(&dev->pending_lock);
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
spin_unlock_irq(&qp->s_lock); spin_unlock_irq(&qp->s_lock);
cancel_work_sync(&qp->s_work); cancel_work_sync(&priv->s_work);
del_timer_sync(&qp->s_timer); del_timer_sync(&qp->s_timer);
wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
if (qp->s_tx) { if (priv->s_tx) {
qib_put_txreq(qp->s_tx); qib_put_txreq(priv->s_tx);
qp->s_tx = NULL; priv->s_tx = NULL;
} }
remove_qp(dev, qp); remove_qp(dev, qp);
wait_event(qp->wait, !atomic_read(&qp->refcount)); wait_event(qp->wait, !atomic_read(&qp->refcount));
...@@ -1245,7 +1259,8 @@ int qib_destroy_qp(struct ib_qp *ibqp) ...@@ -1245,7 +1259,8 @@ int qib_destroy_qp(struct ib_qp *ibqp)
else else
vfree(qp->r_rq.wq); vfree(qp->r_rq.wq);
vfree(qp->s_wq); vfree(qp->s_wq);
kfree(qp->s_hdr); kfree(priv->s_hdr);
kfree(priv);
kfree(qp); kfree(qp);
return 0; return 0;
} }
...@@ -1368,6 +1383,7 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) ...@@ -1368,6 +1383,7 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
{ {
struct qib_swqe *wqe; struct qib_swqe *wqe;
struct qib_qp *qp = iter->qp; struct qib_qp *qp = iter->qp;
struct qib_qp_priv *priv = qp->priv;
wqe = get_swqe_ptr(qp, qp->s_last); wqe = get_swqe_ptr(qp, qp->s_last);
seq_printf(s, seq_printf(s,
...@@ -1379,8 +1395,8 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) ...@@ -1379,8 +1395,8 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
wqe->wr.opcode, wqe->wr.opcode,
qp->s_hdrwords, qp->s_hdrwords,
qp->s_flags, qp->s_flags,
atomic_read(&qp->s_dma_busy), atomic_read(&priv->s_dma_busy),
!list_empty(&qp->iowait), !list_empty(&priv->iowait),
qp->timeout, qp->timeout,
wqe->ssn, wqe->ssn,
qp->s_lsn, qp->s_lsn,
......
...@@ -230,6 +230,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, ...@@ -230,6 +230,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
*/ */
int qib_make_rc_req(struct qib_qp *qp) int qib_make_rc_req(struct qib_qp *qp)
{ {
struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_other_headers *ohdr; struct qib_other_headers *ohdr;
struct qib_sge_state *ss; struct qib_sge_state *ss;
...@@ -244,9 +245,9 @@ int qib_make_rc_req(struct qib_qp *qp) ...@@ -244,9 +245,9 @@ int qib_make_rc_req(struct qib_qp *qp)
int ret = 0; int ret = 0;
int delta; int delta;
ohdr = &qp->s_hdr->u.oth; ohdr = &priv->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &qp->s_hdr->u.l.oth; ohdr = &priv->s_hdr->u.l.oth;
/* /*
* The lock is needed to synchronize between the sending tasklet, * The lock is needed to synchronize between the sending tasklet,
...@@ -266,7 +267,7 @@ int qib_make_rc_req(struct qib_qp *qp) ...@@ -266,7 +267,7 @@ int qib_make_rc_req(struct qib_qp *qp)
if (qp->s_last == qp->s_head) if (qp->s_last == qp->s_head)
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) { if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA; qp->s_flags |= QIB_S_WAIT_DMA;
goto bail; goto bail;
} }
......
...@@ -675,6 +675,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, ...@@ -675,6 +675,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
u32 bth0, u32 bth2) u32 bth0, u32 bth2)
{ {
struct qib_qp_priv *priv = qp->priv;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
u16 lrh0; u16 lrh0;
u32 nwords; u32 nwords;
...@@ -685,17 +686,18 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, ...@@ -685,17 +686,18 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
nwords = (qp->s_cur_size + extra_bytes) >> 2; nwords = (qp->s_cur_size + extra_bytes) >> 2;
lrh0 = QIB_LRH_BTH; lrh0 = QIB_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
&qp->remote_ah_attr.grh, &qp->remote_ah_attr.grh,
qp->s_hdrwords, nwords); qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH; lrh0 = QIB_LRH_GRH;
} }
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
qp->remote_ah_attr.sl << 4; qp->remote_ah_attr.sl << 4;
qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); priv->s_hdr->lrh[2] =
qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
qp->remote_ah_attr.src_path_bits); qp->remote_ah_attr.src_path_bits);
bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
bth0 |= extra_bytes << 20; bth0 |= extra_bytes << 20;
...@@ -717,7 +719,9 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, ...@@ -717,7 +719,9 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
*/ */
void qib_do_send(struct work_struct *work) void qib_do_send(struct work_struct *work)
{ {
struct qib_qp *qp = container_of(work, struct qib_qp, s_work); struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
s_work);
struct qib_qp *qp = priv->owner;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_pportdata *ppd = ppd_from_ibp(ibp);
int (*make_req)(struct qib_qp *qp); int (*make_req)(struct qib_qp *qp);
...@@ -756,7 +760,7 @@ void qib_do_send(struct work_struct *work) ...@@ -756,7 +760,7 @@ void qib_do_send(struct work_struct *work)
* If the packet cannot be sent now, return and * If the packet cannot be sent now, return and
* the send tasklet will be woken up later. * the send tasklet will be woken up later.
*/ */
if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords, if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
qp->s_cur_sge, qp->s_cur_size)) qp->s_cur_sge, qp->s_cur_size))
break; break;
/* Record that s_hdr is empty. */ /* Record that s_hdr is empty. */
......
...@@ -513,7 +513,9 @@ int qib_sdma_running(struct qib_pportdata *ppd) ...@@ -513,7 +513,9 @@ int qib_sdma_running(struct qib_pportdata *ppd)
static void complete_sdma_err_req(struct qib_pportdata *ppd, static void complete_sdma_err_req(struct qib_pportdata *ppd,
struct qib_verbs_txreq *tx) struct qib_verbs_txreq *tx)
{ {
atomic_inc(&tx->qp->s_dma_busy); struct qib_qp_priv *priv = tx->qp->priv;
atomic_inc(&priv->s_dma_busy);
/* no sdma descriptors, so no unmap_desc */ /* no sdma descriptors, so no unmap_desc */
tx->txreq.start_idx = 0; tx->txreq.start_idx = 0;
tx->txreq.next_descq_idx = 0; tx->txreq.next_descq_idx = 0;
...@@ -543,6 +545,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -543,6 +545,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
u64 sdmadesc[2]; u64 sdmadesc[2];
u32 dwoffset; u32 dwoffset;
dma_addr_t addr; dma_addr_t addr;
struct qib_qp_priv *priv;
spin_lock_irqsave(&ppd->sdma_lock, flags); spin_lock_irqsave(&ppd->sdma_lock, flags);
...@@ -644,8 +647,8 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -644,8 +647,8 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD); descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
descqp[0] |= cpu_to_le64(SDMA_DESC_INTR); descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
priv = tx->qp->priv;
atomic_inc(&tx->qp->s_dma_busy); atomic_inc(&priv->s_dma_busy);
tx->txreq.next_descq_idx = tail; tx->txreq.next_descq_idx = tail;
ppd->dd->f_sdma_update_tail(ppd, tail); ppd->dd->f_sdma_update_tail(ppd, tail);
ppd->sdma_descq_added += tx->txreq.sg_count; ppd->sdma_descq_added += tx->txreq.sg_count;
...@@ -663,6 +666,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -663,6 +666,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
unmap_desc(ppd, tail); unmap_desc(ppd, tail);
} }
qp = tx->qp; qp = tx->qp;
priv = qp->priv;
qib_put_txreq(tx); qib_put_txreq(tx);
spin_lock(&qp->r_lock); spin_lock(&qp->r_lock);
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
...@@ -679,6 +683,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -679,6 +683,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
busy: busy:
qp = tx->qp; qp = tx->qp;
priv = qp->priv;
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
struct qib_ibdev *dev; struct qib_ibdev *dev;
...@@ -690,16 +695,16 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -690,16 +695,16 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
*/ */
tx->ss = ss; tx->ss = ss;
tx->dwords = dwords; tx->dwords = dwords;
qp->s_tx = tx; priv->s_tx = tx;
dev = &ppd->dd->verbs_dev; dev = &ppd->dd->verbs_dev;
spin_lock(&dev->pending_lock); spin_lock(&dev->pending_lock);
if (list_empty(&qp->iowait)) { if (list_empty(&priv->iowait)) {
struct qib_ibport *ibp; struct qib_ibport *ibp;
ibp = &ppd->ibport_data; ibp = &ppd->ibport_data;
ibp->n_dmawait++; ibp->n_dmawait++;
qp->s_flags |= QIB_S_WAIT_DMA_DESC; qp->s_flags |= QIB_S_WAIT_DMA_DESC;
list_add_tail(&qp->iowait, &dev->dmawait); list_add_tail(&priv->iowait, &dev->dmawait);
} }
spin_unlock(&dev->pending_lock); spin_unlock(&dev->pending_lock);
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~QIB_S_BUSY;
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
*/ */
int qib_make_uc_req(struct qib_qp *qp) int qib_make_uc_req(struct qib_qp *qp)
{ {
struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr; struct qib_other_headers *ohdr;
struct qib_swqe *wqe; struct qib_swqe *wqe;
unsigned long flags; unsigned long flags;
...@@ -63,7 +64,7 @@ int qib_make_uc_req(struct qib_qp *qp) ...@@ -63,7 +64,7 @@ int qib_make_uc_req(struct qib_qp *qp)
if (qp->s_last == qp->s_head) if (qp->s_last == qp->s_head)
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) { if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA; qp->s_flags |= QIB_S_WAIT_DMA;
goto bail; goto bail;
} }
...@@ -72,9 +73,9 @@ int qib_make_uc_req(struct qib_qp *qp) ...@@ -72,9 +73,9 @@ int qib_make_uc_req(struct qib_qp *qp)
goto done; goto done;
} }
ohdr = &qp->s_hdr->u.oth; ohdr = &priv->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &qp->s_hdr->u.l.oth; ohdr = &priv->s_hdr->u.l.oth;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5; hwords = 5;
......
...@@ -235,6 +235,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) ...@@ -235,6 +235,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
*/ */
int qib_make_ud_req(struct qib_qp *qp) int qib_make_ud_req(struct qib_qp *qp)
{ {
struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr; struct qib_other_headers *ohdr;
struct ib_ah_attr *ah_attr; struct ib_ah_attr *ah_attr;
struct qib_pportdata *ppd; struct qib_pportdata *ppd;
...@@ -258,7 +259,7 @@ int qib_make_ud_req(struct qib_qp *qp) ...@@ -258,7 +259,7 @@ int qib_make_ud_req(struct qib_qp *qp)
if (qp->s_last == qp->s_head) if (qp->s_last == qp->s_head)
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) { if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA; qp->s_flags |= QIB_S_WAIT_DMA;
goto bail; goto bail;
} }
...@@ -295,7 +296,7 @@ int qib_make_ud_req(struct qib_qp *qp) ...@@ -295,7 +296,7 @@ int qib_make_ud_req(struct qib_qp *qp)
* XXX Instead of waiting, we could queue a * XXX Instead of waiting, we could queue a
* zero length descriptor so we get a callback. * zero length descriptor so we get a callback.
*/ */
if (atomic_read(&qp->s_dma_busy)) { if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA; qp->s_flags |= QIB_S_WAIT_DMA;
goto bail; goto bail;
} }
...@@ -325,11 +326,11 @@ int qib_make_ud_req(struct qib_qp *qp) ...@@ -325,11 +326,11 @@ int qib_make_ud_req(struct qib_qp *qp)
if (ah_attr->ah_flags & IB_AH_GRH) { if (ah_attr->ah_flags & IB_AH_GRH) {
/* Header size in 32-bit words. */ /* Header size in 32-bit words. */
qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
&ah_attr->grh, &ah_attr->grh,
qp->s_hdrwords, nwords); qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH; lrh0 = QIB_LRH_GRH;
ohdr = &qp->s_hdr->u.l.oth; ohdr = &priv->s_hdr->u.l.oth;
/* /*
* Don't worry about sending to locally attached multicast * Don't worry about sending to locally attached multicast
* QPs. It is unspecified by the spec. what happens. * QPs. It is unspecified by the spec. what happens.
...@@ -337,7 +338,7 @@ int qib_make_ud_req(struct qib_qp *qp) ...@@ -337,7 +338,7 @@ int qib_make_ud_req(struct qib_qp *qp)
} else { } else {
/* Header size in 32-bit words. */ /* Header size in 32-bit words. */
lrh0 = QIB_LRH_BTH; lrh0 = QIB_LRH_BTH;
ohdr = &qp->s_hdr->u.oth; ohdr = &priv->s_hdr->u.oth;
} }
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_hdrwords++; qp->s_hdrwords++;
...@@ -350,15 +351,16 @@ int qib_make_ud_req(struct qib_qp *qp) ...@@ -350,15 +351,16 @@ int qib_make_ud_req(struct qib_qp *qp)
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
else else
lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12; lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ priv->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); priv->s_hdr->lrh[2] =
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
lid = ppd->lid; lid = ppd->lid;
if (lid) { if (lid) {
lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
qp->s_hdr->lrh[3] = cpu_to_be16(lid); priv->s_hdr->lrh[3] = cpu_to_be16(lid);
} else } else
qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE; priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
if (wqe->wr.send_flags & IB_SEND_SOLICITED) if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED; bth0 |= IB_BTH_SOLICITED;
bth0 |= extra_bytes << 20; bth0 |= extra_bytes << 20;
......
...@@ -486,6 +486,7 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -486,6 +486,7 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr) struct ib_send_wr **bad_wr)
{ {
struct qib_qp *qp = to_iqp(ibqp); struct qib_qp *qp = to_iqp(ibqp);
struct qib_qp_priv *priv = qp->priv;
int err = 0; int err = 0;
int scheduled = 0; int scheduled = 0;
...@@ -499,7 +500,7 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -499,7 +500,7 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
/* Try to do the send work in the caller's context. */ /* Try to do the send work in the caller's context. */
if (!scheduled) if (!scheduled)
qib_do_send(&qp->s_work); qib_do_send(&priv->s_work);
bail: bail:
return err; return err;
...@@ -730,12 +731,14 @@ static void mem_timer(unsigned long data) ...@@ -730,12 +731,14 @@ static void mem_timer(unsigned long data)
struct qib_ibdev *dev = (struct qib_ibdev *) data; struct qib_ibdev *dev = (struct qib_ibdev *) data;
struct list_head *list = &dev->memwait; struct list_head *list = &dev->memwait;
struct qib_qp *qp = NULL; struct qib_qp *qp = NULL;
struct qib_qp_priv *priv = NULL;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev->pending_lock, flags); spin_lock_irqsave(&dev->pending_lock, flags);
if (!list_empty(list)) { if (!list_empty(list)) {
qp = list_entry(list->next, struct qib_qp, iowait); priv = list_entry(list->next, struct qib_qp_priv, iowait);
list_del_init(&qp->iowait); qp = priv->owner;
list_del_init(&priv->iowait);
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
if (!list_empty(list)) if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1); mod_timer(&dev->mem_timer, jiffies + 1);
...@@ -950,6 +953,7 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, ...@@ -950,6 +953,7 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
struct qib_qp *qp) struct qib_qp *qp)
{ {
struct qib_qp_priv *priv = qp->priv;
struct qib_verbs_txreq *tx; struct qib_verbs_txreq *tx;
unsigned long flags; unsigned long flags;
...@@ -965,10 +969,10 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, ...@@ -965,10 +969,10 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
tx = list_entry(l, struct qib_verbs_txreq, txreq.list); tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
} else { } else {
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
list_empty(&qp->iowait)) { list_empty(&priv->iowait)) {
dev->n_txwait++; dev->n_txwait++;
qp->s_flags |= QIB_S_WAIT_TX; qp->s_flags |= QIB_S_WAIT_TX;
list_add_tail(&qp->iowait, &dev->txwait); list_add_tail(&priv->iowait, &dev->txwait);
} }
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~QIB_S_BUSY;
spin_unlock(&dev->pending_lock); spin_unlock(&dev->pending_lock);
...@@ -1004,6 +1008,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) ...@@ -1004,6 +1008,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
{ {
struct qib_ibdev *dev; struct qib_ibdev *dev;
struct qib_qp *qp; struct qib_qp *qp;
struct qib_qp_priv *priv;
unsigned long flags; unsigned long flags;
qp = tx->qp; qp = tx->qp;
...@@ -1030,8 +1035,10 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) ...@@ -1030,8 +1035,10 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
if (!list_empty(&dev->txwait)) { if (!list_empty(&dev->txwait)) {
/* Wake up first QP wanting a free struct */ /* Wake up first QP wanting a free struct */
qp = list_entry(dev->txwait.next, struct qib_qp, iowait); priv = list_entry(dev->txwait.next, struct qib_qp_priv,
list_del_init(&qp->iowait); iowait);
qp = priv->owner;
list_del_init(&priv->iowait);
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
spin_unlock_irqrestore(&dev->pending_lock, flags); spin_unlock_irqrestore(&dev->pending_lock, flags);
...@@ -1057,6 +1064,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) ...@@ -1057,6 +1064,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
{ {
struct qib_qp *qp, *nqp; struct qib_qp *qp, *nqp;
struct qib_qp_priv *qpp, *nqpp;
struct qib_qp *qps[20]; struct qib_qp *qps[20];
struct qib_ibdev *dev; struct qib_ibdev *dev;
unsigned i, n; unsigned i, n;
...@@ -1066,15 +1074,17 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) ...@@ -1066,15 +1074,17 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
spin_lock(&dev->pending_lock); spin_lock(&dev->pending_lock);
/* Search wait list for first QP wanting DMA descriptors. */ /* Search wait list for first QP wanting DMA descriptors. */
list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
qp = qpp->owner;
nqp = nqpp->owner;
if (qp->port_num != ppd->port) if (qp->port_num != ppd->port)
continue; continue;
if (n == ARRAY_SIZE(qps)) if (n == ARRAY_SIZE(qps))
break; break;
if (qp->s_tx->txreq.sg_count > avail) if (qpp->s_tx->txreq.sg_count > avail)
break; break;
avail -= qp->s_tx->txreq.sg_count; avail -= qpp->s_tx->txreq.sg_count;
list_del_init(&qp->iowait); list_del_init(&qpp->iowait);
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
qps[n++] = qp; qps[n++] = qp;
} }
...@@ -1102,6 +1112,7 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) ...@@ -1102,6 +1112,7 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
struct qib_verbs_txreq *tx = struct qib_verbs_txreq *tx =
container_of(cookie, struct qib_verbs_txreq, txreq); container_of(cookie, struct qib_verbs_txreq, txreq);
struct qib_qp *qp = tx->qp; struct qib_qp *qp = tx->qp;
struct qib_qp_priv *priv = qp->priv;
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
if (tx->wqe) if (tx->wqe)
...@@ -1118,9 +1129,9 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) ...@@ -1118,9 +1129,9 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
} }
qib_rc_send_complete(qp, hdr); qib_rc_send_complete(qp, hdr);
} }
if (atomic_dec_and_test(&qp->s_dma_busy)) { if (atomic_dec_and_test(&priv->s_dma_busy)) {
if (qp->state == IB_QPS_RESET) if (qp->state == IB_QPS_RESET)
wake_up(&qp->wait_dma); wake_up(&priv->wait_dma);
else if (qp->s_flags & QIB_S_WAIT_DMA) { else if (qp->s_flags & QIB_S_WAIT_DMA) {
qp->s_flags &= ~QIB_S_WAIT_DMA; qp->s_flags &= ~QIB_S_WAIT_DMA;
qib_schedule_send(qp); qib_schedule_send(qp);
...@@ -1133,17 +1144,18 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) ...@@ -1133,17 +1144,18 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
{ {
struct qib_qp_priv *priv = qp->priv;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
spin_lock(&dev->pending_lock); spin_lock(&dev->pending_lock);
if (list_empty(&qp->iowait)) { if (list_empty(&priv->iowait)) {
if (list_empty(&dev->memwait)) if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1); mod_timer(&dev->mem_timer, jiffies + 1);
qp->s_flags |= QIB_S_WAIT_KMEM; qp->s_flags |= QIB_S_WAIT_KMEM;
list_add_tail(&qp->iowait, &dev->memwait); list_add_tail(&priv->iowait, &dev->memwait);
} }
spin_unlock(&dev->pending_lock); spin_unlock(&dev->pending_lock);
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~QIB_S_BUSY;
...@@ -1158,6 +1170,7 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, ...@@ -1158,6 +1170,7 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
u32 hdrwords, struct qib_sge_state *ss, u32 len, u32 hdrwords, struct qib_sge_state *ss, u32 len,
u32 plen, u32 dwords) u32 plen, u32 dwords)
{ {
struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_devdata *dd = dd_from_dev(dev); struct qib_devdata *dd = dd_from_dev(dev);
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
...@@ -1168,9 +1181,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, ...@@ -1168,9 +1181,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
u32 ndesc; u32 ndesc;
int ret; int ret;
tx = qp->s_tx; tx = priv->s_tx;
if (tx) { if (tx) {
qp->s_tx = NULL; priv->s_tx = NULL;
/* resend previously constructed packet */ /* resend previously constructed packet */
ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
goto bail; goto bail;
...@@ -1260,6 +1273,7 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, ...@@ -1260,6 +1273,7 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
*/ */
static int no_bufs_available(struct qib_qp *qp) static int no_bufs_available(struct qib_qp *qp)
{ {
struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_devdata *dd; struct qib_devdata *dd;
unsigned long flags; unsigned long flags;
...@@ -1274,10 +1288,10 @@ static int no_bufs_available(struct qib_qp *qp) ...@@ -1274,10 +1288,10 @@ static int no_bufs_available(struct qib_qp *qp)
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
spin_lock(&dev->pending_lock); spin_lock(&dev->pending_lock);
if (list_empty(&qp->iowait)) { if (list_empty(&priv->iowait)) {
dev->n_piowait++; dev->n_piowait++;
qp->s_flags |= QIB_S_WAIT_PIO; qp->s_flags |= QIB_S_WAIT_PIO;
list_add_tail(&qp->iowait, &dev->piowait); list_add_tail(&priv->iowait, &dev->piowait);
dd = dd_from_dev(dev); dd = dd_from_dev(dev);
dd->f_wantpiobuf_intr(dd, 1); dd->f_wantpiobuf_intr(dd, 1);
} }
...@@ -1534,6 +1548,7 @@ void qib_ib_piobufavail(struct qib_devdata *dd) ...@@ -1534,6 +1548,7 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
struct qib_qp *qp; struct qib_qp *qp;
unsigned long flags; unsigned long flags;
unsigned i, n; unsigned i, n;
struct qib_qp_priv *priv;
list = &dev->piowait; list = &dev->piowait;
n = 0; n = 0;
...@@ -1548,8 +1563,9 @@ void qib_ib_piobufavail(struct qib_devdata *dd) ...@@ -1548,8 +1563,9 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
while (!list_empty(list)) { while (!list_empty(list)) {
if (n == ARRAY_SIZE(qps)) if (n == ARRAY_SIZE(qps))
goto full; goto full;
qp = list_entry(list->next, struct qib_qp, iowait); priv = list_entry(list->next, struct qib_qp_priv, iowait);
list_del_init(&qp->iowait); qp = priv->owner;
list_del_init(&priv->iowait);
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
qps[n++] = qp; qps[n++] = qp;
} }
...@@ -2330,11 +2346,12 @@ void qib_unregister_ib_device(struct qib_devdata *dd) ...@@ -2330,11 +2346,12 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
*/ */
void qib_schedule_send(struct qib_qp *qp) void qib_schedule_send(struct qib_qp *qp)
{ {
struct qib_qp_priv *priv = qp->priv;
if (qib_send_ok(qp)) { if (qib_send_ok(qp)) {
struct qib_ibport *ibp = struct qib_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num); to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_pportdata *ppd = ppd_from_ibp(ibp);
queue_work(ppd->qib_wq, &qp->s_work); queue_work(ppd->qib_wq, &priv->s_work);
} }
} }
...@@ -412,6 +412,21 @@ struct qib_ack_entry { ...@@ -412,6 +412,21 @@ struct qib_ack_entry {
}; };
}; };
/*
* qib specific data structure that will be hidden from rvt after the queue pair
* is made common.
*/
struct qib_qp;
struct qib_qp_priv {
struct qib_ib_header *s_hdr; /* next packet header to send */
struct list_head iowait; /* link for wait PIO buf */
atomic_t s_dma_busy;
struct qib_verbs_txreq *s_tx;
struct work_struct s_work;
wait_queue_head_t wait_dma;
struct qib_qp *owner;
};
/* /*
* Variables prefixed with s_ are for the requester (sender). * Variables prefixed with s_ are for the requester (sender).
* Variables prefixed with r_ are for the responder (receiver). * Variables prefixed with r_ are for the responder (receiver).
...@@ -422,13 +437,13 @@ struct qib_ack_entry { ...@@ -422,13 +437,13 @@ struct qib_ack_entry {
*/ */
struct qib_qp { struct qib_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
struct qib_qp_priv *priv;
/* read mostly fields above and below */ /* read mostly fields above and below */
struct ib_ah_attr remote_ah_attr; struct ib_ah_attr remote_ah_attr;
struct ib_ah_attr alt_ah_attr; struct ib_ah_attr alt_ah_attr;
struct qib_qp __rcu *next; /* link list for QPN hash table */ struct qib_qp __rcu *next; /* link list for QPN hash table */
struct qib_swqe *s_wq; /* send work queue */ struct qib_swqe *s_wq; /* send work queue */
struct qib_mmap_info *ip; struct qib_mmap_info *ip;
struct qib_ib_header *s_hdr; /* next packet header to send */
unsigned long timeout_jiffies; /* computed from timeout */ unsigned long timeout_jiffies; /* computed from timeout */
enum ib_mtu path_mtu; enum ib_mtu path_mtu;
...@@ -486,11 +501,11 @@ struct qib_qp { ...@@ -486,11 +501,11 @@ struct qib_qp {
spinlock_t s_lock ____cacheline_aligned_in_smp; spinlock_t s_lock ____cacheline_aligned_in_smp;
struct qib_sge_state *s_cur_sge; struct qib_sge_state *s_cur_sge;
u32 s_flags; u32 s_flags;
struct qib_verbs_txreq *s_tx;
struct qib_swqe *s_wqe; struct qib_swqe *s_wqe;
struct qib_sge_state s_sge; /* current send request data */ struct qib_sge_state s_sge; /* current send request data */
struct qib_mregion *s_rdma_mr; struct qib_mregion *s_rdma_mr;
atomic_t s_dma_busy;
u32 s_cur_size; /* size of send packet in bytes */ u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */ u32 s_len; /* total length of s_sge */
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
...@@ -521,11 +536,6 @@ struct qib_qp { ...@@ -521,11 +536,6 @@ struct qib_qp {
struct qib_sge_state s_ack_rdma_sge; struct qib_sge_state s_ack_rdma_sge;
struct timer_list s_timer; struct timer_list s_timer;
struct list_head iowait; /* link for wait PIO buf */
struct work_struct s_work;
wait_queue_head_t wait_dma;
struct qib_sge r_sg_list[0] /* verified SGEs */ struct qib_sge r_sg_list[0] /* verified SGEs */
____cacheline_aligned_in_smp; ____cacheline_aligned_in_smp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment