Commit 01ba79d4 authored by Harish Chegondi's avatar Harish Chegondi Committed by Doug Ledford

IB/qib: Use rdmavt send and receive flags

Use the definitions of the s_flags and r_flags which are now in rdmavt.
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarHarish Chegondi <harish.chegondi@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 0aeddea2
...@@ -414,7 +414,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, ...@@ -414,7 +414,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
*/ */
if (list_empty(&qp->rspwait)) { if (list_empty(&qp->rspwait)) {
qp->r_flags |= qp->r_flags |=
QIB_R_RSP_NAK; RVT_R_RSP_NAK;
atomic_inc( atomic_inc(
&qp->refcount); &qp->refcount);
list_add_tail( list_add_tail(
...@@ -583,14 +583,14 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) ...@@ -583,14 +583,14 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
*/ */
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait); list_del_init(&qp->rspwait);
if (qp->r_flags & QIB_R_RSP_NAK) { if (qp->r_flags & RVT_R_RSP_NAK) {
qp->r_flags &= ~QIB_R_RSP_NAK; qp->r_flags &= ~RVT_R_RSP_NAK;
qib_send_rc_ack(qp); qib_send_rc_ack(qp);
} }
if (qp->r_flags & QIB_R_RSP_SEND) { if (qp->r_flags & RVT_R_RSP_SEND) {
unsigned long flags; unsigned long flags;
qp->r_flags &= ~QIB_R_RSP_SEND; qp->r_flags &= ~RVT_R_RSP_SEND;
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (ib_qib_state_ops[qp->state] & if (ib_qib_state_ops[qp->state] &
QIB_PROCESS_OR_FLUSH_SEND) QIB_PROCESS_OR_FLUSH_SEND)
......
...@@ -386,7 +386,7 @@ static void qib_reset_qp(struct rvt_qp *qp, enum ib_qp_type type) ...@@ -386,7 +386,7 @@ static void qib_reset_qp(struct rvt_qp *qp, enum ib_qp_type type)
qp->qkey = 0; qp->qkey = 0;
qp->qp_access_flags = 0; qp->qp_access_flags = 0;
atomic_set(&priv->s_dma_busy, 0); atomic_set(&priv->s_dma_busy, 0);
qp->s_flags &= QIB_S_SIGNAL_REQ_WR; qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
qp->s_wqe = NULL; qp->s_wqe = NULL;
qp->s_draining = 0; qp->s_draining = 0;
...@@ -431,7 +431,7 @@ static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) ...@@ -431,7 +431,7 @@ static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
{ {
unsigned n; unsigned n;
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qib_put_ss(&qp->s_rdma_read_sge); qib_put_ss(&qp->s_rdma_read_sge);
qib_put_ss(&qp->r_sge); qib_put_ss(&qp->r_sge);
...@@ -496,22 +496,22 @@ int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err) ...@@ -496,22 +496,22 @@ int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
qp->state = IB_QPS_ERR; qp->state = IB_QPS_ERR;
if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer); del_timer(&qp->s_timer);
} }
if (qp->s_flags & QIB_S_ANY_WAIT_SEND) if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
spin_lock(&dev->rdi.pending_lock); spin_lock(&dev->rdi.pending_lock);
if (!list_empty(&priv->iowait) && !(qp->s_flags & QIB_S_BUSY)) { if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
qp->s_flags &= ~QIB_S_ANY_WAIT_IO; qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
list_del_init(&priv->iowait); list_del_init(&priv->iowait);
} }
spin_unlock(&dev->rdi.pending_lock); spin_unlock(&dev->rdi.pending_lock);
if (!(qp->s_flags & QIB_S_BUSY)) { if (!(qp->s_flags & RVT_S_BUSY)) {
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
if (qp->s_rdma_mr) { if (qp->s_rdma_mr) {
rvt_put_mr(qp->s_rdma_mr); rvt_put_mr(qp->s_rdma_mr);
...@@ -533,7 +533,7 @@ int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err) ...@@ -533,7 +533,7 @@ int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
wc.qp = &qp->ibqp; wc.qp = &qp->ibqp;
wc.opcode = IB_WC_RECV; wc.opcode = IB_WC_RECV;
if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
wc.status = err; wc.status = err;
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
...@@ -716,7 +716,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -716,7 +716,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (!list_empty(&priv->iowait)) if (!list_empty(&priv->iowait))
list_del_init(&priv->iowait); list_del_init(&priv->iowait);
spin_unlock(&dev->rdi.pending_lock); spin_unlock(&dev->rdi.pending_lock);
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
spin_unlock(&qp->s_lock); spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock); spin_unlock_irq(&qp->r_lock);
/* Stop the sending work queue and retry timer */ /* Stop the sending work queue and retry timer */
...@@ -739,7 +739,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -739,7 +739,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
case IB_QPS_RTR: case IB_QPS_RTR:
/* Allow event to retrigger if QP set to RTR more than once */ /* Allow event to retrigger if QP set to RTR more than once */
qp->r_flags &= ~QIB_R_COMM_EST; qp->r_flags &= ~RVT_R_COMM_EST;
qp->state = new_state; qp->state = new_state;
break; break;
...@@ -910,7 +910,7 @@ int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -910,7 +910,7 @@ int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->recv_cq = qp->ibqp.recv_cq; init_attr->recv_cq = qp->ibqp.recv_cq;
init_attr->srq = qp->ibqp.srq; init_attr->srq = qp->ibqp.srq;
init_attr->cap = attr->cap; init_attr->cap = attr->cap;
if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
else else
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
...@@ -1128,7 +1128,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, ...@@ -1128,7 +1128,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
qp->s_size = init_attr->cap.max_send_wr + 1; qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge; qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
qp->s_flags = QIB_S_SIGNAL_REQ_WR; qp->s_flags = RVT_S_SIGNAL_REQ_WR;
dev = to_idev(ibpd->device); dev = to_idev(ibpd->device);
dd = dd_from_dev(dev); dd = dd_from_dev(dev);
err = alloc_qpn(dd, &dev->rdi.qp_dev->qpn_table, err = alloc_qpn(dd, &dev->rdi.qp_dev->qpn_table,
...@@ -1244,7 +1244,7 @@ int qib_destroy_qp(struct ib_qp *ibqp) ...@@ -1244,7 +1244,7 @@ int qib_destroy_qp(struct ib_qp *ibqp)
if (!list_empty(&priv->iowait)) if (!list_empty(&priv->iowait))
list_del_init(&priv->iowait); list_del_init(&priv->iowait);
spin_unlock(&dev->rdi.pending_lock); spin_unlock(&dev->rdi.pending_lock);
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
spin_unlock_irq(&qp->s_lock); spin_unlock_irq(&qp->s_lock);
cancel_work_sync(&priv->s_work); cancel_work_sync(&priv->s_work);
del_timer_sync(&qp->s_timer); del_timer_sync(&qp->s_timer);
...@@ -1318,20 +1318,20 @@ void qib_get_credit(struct rvt_qp *qp, u32 aeth) ...@@ -1318,20 +1318,20 @@ void qib_get_credit(struct rvt_qp *qp, u32 aeth)
* honor the credit field. * honor the credit field.
*/ */
if (credit == QIB_AETH_CREDIT_INVAL) { if (credit == QIB_AETH_CREDIT_INVAL) {
if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
qp->s_flags |= QIB_S_UNLIMITED_CREDIT; qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
} }
} else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */ /* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
if (qib_cmp24(credit, qp->s_lsn) > 0) { if (qib_cmp24(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit; qp->s_lsn = credit;
if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
} }
......
...@@ -56,7 +56,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, ...@@ -56,7 +56,7 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
static void start_timer(struct rvt_qp *qp) static void start_timer(struct rvt_qp *qp)
{ {
qp->s_flags |= QIB_S_TIMER; qp->s_flags |= RVT_S_TIMER;
qp->s_timer.function = rc_timeout; qp->s_timer.function = rc_timeout;
/* 4.096 usec. * (1 << qp->timeout) */ /* 4.096 usec. * (1 << qp->timeout) */
qp->s_timer.expires = jiffies + qp->timeout_jiffies; qp->s_timer.expires = jiffies + qp->timeout_jiffies;
...@@ -112,7 +112,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, ...@@ -112,7 +112,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
case OP(ACKNOWLEDGE): case OP(ACKNOWLEDGE):
/* Check for no next entry in the queue. */ /* Check for no next entry in the queue. */
if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
if (qp->s_flags & QIB_S_ACK_PENDING) if (qp->s_flags & RVT_S_ACK_PENDING)
goto normal; goto normal;
goto bail; goto bail;
} }
...@@ -196,7 +196,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, ...@@ -196,7 +196,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
* (see above). * (see above).
*/ */
qp->s_ack_state = OP(SEND_ONLY); qp->s_ack_state = OP(SEND_ONLY);
qp->s_flags &= ~QIB_S_ACK_PENDING; qp->s_flags &= ~RVT_S_ACK_PENDING;
qp->s_cur_sge = NULL; qp->s_cur_sge = NULL;
if (qp->s_nak_state) if (qp->s_nak_state)
ohdr->u.aeth = ohdr->u.aeth =
...@@ -218,7 +218,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, ...@@ -218,7 +218,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
bail: bail:
qp->s_ack_state = OP(ACKNOWLEDGE); qp->s_ack_state = OP(ACKNOWLEDGE);
qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING); qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
return 0; return 0;
} }
...@@ -256,7 +256,7 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -256,7 +256,7 @@ int qib_make_rc_req(struct rvt_qp *qp)
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
/* Sending responses has higher priority over sending requests. */ /* Sending responses has higher priority over sending requests. */
if ((qp->s_flags & QIB_S_RESP_PENDING) && if ((qp->s_flags & RVT_S_RESP_PENDING) &&
qib_make_rc_ack(dev, qp, ohdr, pmtu)) qib_make_rc_ack(dev, qp, ohdr, pmtu))
goto done; goto done;
...@@ -268,7 +268,7 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -268,7 +268,7 @@ int qib_make_rc_req(struct rvt_qp *qp)
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) { if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA; qp->s_flags |= RVT_S_WAIT_DMA;
goto bail; goto bail;
} }
wqe = get_swqe_ptr(qp, qp->s_last); wqe = get_swqe_ptr(qp, qp->s_last);
...@@ -278,12 +278,12 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -278,12 +278,12 @@ int qib_make_rc_req(struct rvt_qp *qp)
goto done; goto done;
} }
if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK)) if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
goto bail; goto bail;
if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) { if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
qp->s_flags |= QIB_S_WAIT_PSN; qp->s_flags |= RVT_S_WAIT_PSN;
goto bail; goto bail;
} }
qp->s_sending_psn = qp->s_psn; qp->s_sending_psn = qp->s_psn;
...@@ -318,7 +318,7 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -318,7 +318,7 @@ int qib_make_rc_req(struct rvt_qp *qp)
*/ */
if ((wqe->wr.send_flags & IB_SEND_FENCE) && if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
qp->s_num_rd_atomic) { qp->s_num_rd_atomic) {
qp->s_flags |= QIB_S_WAIT_FENCE; qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail; goto bail;
} }
wqe->psn = qp->s_next_psn; wqe->psn = qp->s_next_psn;
...@@ -336,9 +336,9 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -336,9 +336,9 @@ int qib_make_rc_req(struct rvt_qp *qp)
case IB_WR_SEND: case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
} }
wqe->lpsn = wqe->psn; wqe->lpsn = wqe->psn;
...@@ -364,14 +364,14 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -364,14 +364,14 @@ int qib_make_rc_req(struct rvt_qp *qp)
break; break;
case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE:
if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++; qp->s_lsn++;
/* FALLTHROUGH */ /* FALLTHROUGH */
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
} }
...@@ -412,11 +412,11 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -412,11 +412,11 @@ int qib_make_rc_req(struct rvt_qp *qp)
if (newreq) { if (newreq) {
if (qp->s_num_rd_atomic >= if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) { qp->s_max_rd_atomic) {
qp->s_flags |= QIB_S_WAIT_RDMAR; qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail; goto bail;
} }
qp->s_num_rd_atomic++; qp->s_num_rd_atomic++;
if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++; qp->s_lsn++;
/* /*
* Adjust s_next_psn to count the * Adjust s_next_psn to count the
...@@ -450,11 +450,11 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -450,11 +450,11 @@ int qib_make_rc_req(struct rvt_qp *qp)
if (newreq) { if (newreq) {
if (qp->s_num_rd_atomic >= if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) { qp->s_max_rd_atomic) {
qp->s_flags |= QIB_S_WAIT_RDMAR; qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail; goto bail;
} }
qp->s_num_rd_atomic++; qp->s_num_rd_atomic++;
if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++; qp->s_lsn++;
wqe->lpsn = wqe->psn; wqe->lpsn = wqe->psn;
} }
...@@ -619,9 +619,9 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -619,9 +619,9 @@ int qib_make_rc_req(struct rvt_qp *qp)
delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8; delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
if (delta && delta % QIB_PSN_CREDIT == 0) if (delta && delta % QIB_PSN_CREDIT == 0)
bth2 |= IB_BTH_REQ_ACK; bth2 |= IB_BTH_REQ_ACK;
if (qp->s_flags & QIB_S_SEND_ONE) { if (qp->s_flags & RVT_S_SEND_ONE) {
qp->s_flags &= ~QIB_S_SEND_ONE; qp->s_flags &= ~RVT_S_SEND_ONE;
qp->s_flags |= QIB_S_WAIT_ACK; qp->s_flags |= RVT_S_WAIT_ACK;
bth2 |= IB_BTH_REQ_ACK; bth2 |= IB_BTH_REQ_ACK;
} }
qp->s_len -= len; qp->s_len -= len;
...@@ -634,7 +634,7 @@ int qib_make_rc_req(struct rvt_qp *qp) ...@@ -634,7 +634,7 @@ int qib_make_rc_req(struct rvt_qp *qp)
goto unlock; goto unlock;
bail: bail:
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
unlock: unlock:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
return ret; return ret;
...@@ -670,7 +670,7 @@ void qib_send_rc_ack(struct rvt_qp *qp) ...@@ -670,7 +670,7 @@ void qib_send_rc_ack(struct rvt_qp *qp)
goto unlock; goto unlock;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt) if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
goto queue_ack; goto queue_ack;
/* Construct the header with s_lock held so APM doesn't change it. */ /* Construct the header with s_lock held so APM doesn't change it. */
...@@ -761,7 +761,7 @@ void qib_send_rc_ack(struct rvt_qp *qp) ...@@ -761,7 +761,7 @@ void qib_send_rc_ack(struct rvt_qp *qp)
queue_ack: queue_ack:
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
this_cpu_inc(*ibp->rvp.rc_qacks); this_cpu_inc(*ibp->rvp.rc_qacks);
qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
qp->s_nak_state = qp->r_nak_state; qp->s_nak_state = qp->r_nak_state;
qp->s_ack_psn = qp->r_ack_psn; qp->s_ack_psn = qp->r_ack_psn;
...@@ -855,13 +855,13 @@ static void reset_psn(struct rvt_qp *qp, u32 psn) ...@@ -855,13 +855,13 @@ static void reset_psn(struct rvt_qp *qp, u32 psn)
done: done:
qp->s_psn = psn; qp->s_psn = psn;
/* /*
* Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
* asynchronously before the send tasklet can get scheduled. * asynchronously before the send tasklet can get scheduled.
* Doing it in qib_make_rc_req() is too late. * Doing it in qib_make_rc_req() is too late.
*/ */
if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) && if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
(qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
qp->s_flags |= QIB_S_WAIT_PSN; qp->s_flags |= RVT_S_WAIT_PSN;
} }
/* /*
...@@ -892,11 +892,11 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) ...@@ -892,11 +892,11 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
else else
ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
QIB_S_WAIT_ACK); RVT_S_WAIT_ACK);
if (wait) if (wait)
qp->s_flags |= QIB_S_SEND_ONE; qp->s_flags |= RVT_S_SEND_ONE;
reset_psn(qp, psn); reset_psn(qp, psn);
} }
...@@ -911,10 +911,10 @@ static void rc_timeout(unsigned long arg) ...@@ -911,10 +911,10 @@ static void rc_timeout(unsigned long arg)
spin_lock_irqsave(&qp->r_lock, flags); spin_lock_irqsave(&qp->r_lock, flags);
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
if (qp->s_flags & QIB_S_TIMER) { if (qp->s_flags & RVT_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num); ibp = to_iport(qp->ibqp.device, qp->port_num);
ibp->rvp.n_rc_timeouts++; ibp->rvp.n_rc_timeouts++;
qp->s_flags &= ~QIB_S_TIMER; qp->s_flags &= ~RVT_S_TIMER;
del_timer(&qp->s_timer); del_timer(&qp->s_timer);
qib_restart_rc(qp, qp->s_last_psn + 1, 1); qib_restart_rc(qp, qp->s_last_psn + 1, 1);
qib_schedule_send(qp); qib_schedule_send(qp);
...@@ -932,8 +932,8 @@ void qib_rc_rnr_retry(unsigned long arg) ...@@ -932,8 +932,8 @@ void qib_rc_rnr_retry(unsigned long arg)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (qp->s_flags & QIB_S_WAIT_RNR) { if (qp->s_flags & RVT_S_WAIT_RNR) {
qp->s_flags &= ~QIB_S_WAIT_RNR; qp->s_flags &= ~RVT_S_WAIT_RNR;
del_timer(&qp->s_timer); del_timer(&qp->s_timer);
qib_schedule_send(qp); qib_schedule_send(qp);
} }
...@@ -1003,7 +1003,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr) ...@@ -1003,7 +1003,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
* there are still requests that haven't been acked. * there are still requests that haven't been acked.
*/ */
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
!(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
start_timer(qp); start_timer(qp);
...@@ -1018,7 +1018,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr) ...@@ -1018,7 +1018,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
rvt_put_mr(sge->mr); rvt_put_mr(sge->mr);
} }
/* Post a send completion queue entry if requested. */ /* Post a send completion queue entry if requested. */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) { (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc)); memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id; wc.wr_id = wqe->wr.wr_id;
...@@ -1035,9 +1035,9 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr) ...@@ -1035,9 +1035,9 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
* If we were waiting for sends to complete before resending, * If we were waiting for sends to complete before resending,
* and they are now complete, restart sending. * and they are now complete, restart sending.
*/ */
if (qp->s_flags & QIB_S_WAIT_PSN && if (qp->s_flags & RVT_S_WAIT_PSN &&
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
qp->s_flags &= ~QIB_S_WAIT_PSN; qp->s_flags &= ~RVT_S_WAIT_PSN;
qp->s_sending_psn = qp->s_psn; qp->s_sending_psn = qp->s_psn;
qp->s_sending_hpsn = qp->s_psn - 1; qp->s_sending_hpsn = qp->s_psn - 1;
qib_schedule_send(qp); qib_schedule_send(qp);
...@@ -1074,7 +1074,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1074,7 +1074,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
rvt_put_mr(sge->mr); rvt_put_mr(sge->mr);
} }
/* Post a send completion queue entry if requested. */ /* Post a send completion queue entry if requested. */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) { (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc)); memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id; wc.wr_id = wqe->wr.wr_id;
...@@ -1138,8 +1138,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1138,8 +1138,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
int diff; int diff;
/* Remove QP from retry timer */ /* Remove QP from retry timer */
if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer); del_timer(&qp->s_timer);
} }
...@@ -1187,11 +1187,11 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1187,11 +1187,11 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
(opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
/* Retry this request. */ /* Retry this request. */
if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) { if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
qp->r_flags |= QIB_R_RDMAR_SEQ; qp->r_flags |= RVT_R_RDMAR_SEQ;
qib_restart_rc(qp, qp->s_last_psn + 1, 0); qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) { if (list_empty(&qp->rspwait)) {
qp->r_flags |= QIB_R_RSP_SEND; qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, list_add_tail(&qp->rspwait,
&rcd->qp_wait_list); &rcd->qp_wait_list);
...@@ -1214,14 +1214,14 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1214,14 +1214,14 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
qp->s_num_rd_atomic--; qp->s_num_rd_atomic--;
/* Restart sending task if fence is complete */ /* Restart sending task if fence is complete */
if ((qp->s_flags & QIB_S_WAIT_FENCE) && if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
!qp->s_num_rd_atomic) { !qp->s_num_rd_atomic) {
qp->s_flags &= ~(QIB_S_WAIT_FENCE | qp->s_flags &= ~(RVT_S_WAIT_FENCE |
QIB_S_WAIT_ACK); RVT_S_WAIT_ACK);
qib_schedule_send(qp); qib_schedule_send(qp);
} else if (qp->s_flags & QIB_S_WAIT_RDMAR) { } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
qp->s_flags &= ~(QIB_S_WAIT_RDMAR | qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
QIB_S_WAIT_ACK); RVT_S_WAIT_ACK);
qib_schedule_send(qp); qib_schedule_send(qp);
} }
} }
...@@ -1249,8 +1249,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1249,8 +1249,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
qp->s_state = OP(SEND_LAST); qp->s_state = OP(SEND_LAST);
qp->s_psn = psn + 1; qp->s_psn = psn + 1;
} }
if (qp->s_flags & QIB_S_WAIT_ACK) { if (qp->s_flags & RVT_S_WAIT_ACK) {
qp->s_flags &= ~QIB_S_WAIT_ACK; qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
qib_get_credit(qp, aeth); qib_get_credit(qp, aeth);
...@@ -1264,7 +1264,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1264,7 +1264,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_rnr_naks++; ibp->rvp.n_rnr_naks++;
if (qp->s_acked == qp->s_tail) if (qp->s_acked == qp->s_tail)
goto bail; goto bail;
if (qp->s_flags & QIB_S_WAIT_RNR) if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail; goto bail;
if (qp->s_rnr_retry == 0) { if (qp->s_rnr_retry == 0) {
status = IB_WC_RNR_RETRY_EXC_ERR; status = IB_WC_RNR_RETRY_EXC_ERR;
...@@ -1280,8 +1280,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1280,8 +1280,8 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
reset_psn(qp, psn); reset_psn(qp, psn);
qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK); qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
qp->s_flags |= QIB_S_WAIT_RNR; qp->s_flags |= RVT_S_WAIT_RNR;
qp->s_timer.function = qib_rc_rnr_retry; qp->s_timer.function = qib_rc_rnr_retry;
qp->s_timer.expires = jiffies + usecs_to_jiffies( qp->s_timer.expires = jiffies + usecs_to_jiffies(
ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) & ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
...@@ -1356,8 +1356,8 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, ...@@ -1356,8 +1356,8 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
struct rvt_swqe *wqe; struct rvt_swqe *wqe;
/* Remove QP from retry timer */ /* Remove QP from retry timer */
if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer); del_timer(&qp->s_timer);
} }
...@@ -1372,10 +1372,10 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, ...@@ -1372,10 +1372,10 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
} }
ibp->rvp.n_rdma_seq++; ibp->rvp.n_rdma_seq++;
qp->r_flags |= QIB_R_RDMAR_SEQ; qp->r_flags |= RVT_R_RDMAR_SEQ;
qib_restart_rc(qp, qp->s_last_psn + 1, 0); qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) { if (list_empty(&qp->rspwait)) {
qp->r_flags |= QIB_R_RSP_SEND; qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list); list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
} }
...@@ -1426,7 +1426,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, ...@@ -1426,7 +1426,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
* If send tasklet not running attempt to progress * If send tasklet not running attempt to progress
* SDMA queue. * SDMA queue.
*/ */
if (!(qp->s_flags & QIB_S_BUSY)) { if (!(qp->s_flags & RVT_S_BUSY)) {
/* Acquire SDMA Lock */ /* Acquire SDMA Lock */
spin_lock_irqsave(&ppd->sdma_lock, flags); spin_lock_irqsave(&ppd->sdma_lock, flags);
/* Invoke sdma make progress */ /* Invoke sdma make progress */
...@@ -1461,10 +1461,10 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, ...@@ -1461,10 +1461,10 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
* Skip everything other than the PSN we expect, if we are waiting * Skip everything other than the PSN we expect, if we are waiting
* for a reply to a restarted RDMA read or atomic op. * for a reply to a restarted RDMA read or atomic op.
*/ */
if (qp->r_flags & QIB_R_RDMAR_SEQ) { if (qp->r_flags & RVT_R_RDMAR_SEQ) {
if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
goto ack_done; goto ack_done;
qp->r_flags &= ~QIB_R_RDMAR_SEQ; qp->r_flags &= ~RVT_R_RDMAR_SEQ;
} }
if (unlikely(qp->s_acked == qp->s_tail)) if (unlikely(qp->s_acked == qp->s_tail))
...@@ -1516,10 +1516,10 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, ...@@ -1516,10 +1516,10 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
* We got a response so update the timeout. * We got a response so update the timeout.
* 4.096 usec. * (1 << qp->timeout) * 4.096 usec. * (1 << qp->timeout)
*/ */
qp->s_flags |= QIB_S_TIMER; qp->s_flags |= RVT_S_TIMER;
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
if (qp->s_flags & QIB_S_WAIT_ACK) { if (qp->s_flags & RVT_S_WAIT_ACK) {
qp->s_flags &= ~QIB_S_WAIT_ACK; qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
...@@ -1653,7 +1653,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, ...@@ -1653,7 +1653,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* Otherwise, we end up propagating congestion. * Otherwise, we end up propagating congestion.
*/ */
if (list_empty(&qp->rspwait)) { if (list_empty(&qp->rspwait)) {
qp->r_flags |= QIB_R_RSP_NAK; qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list); list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
} }
...@@ -1792,7 +1792,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, ...@@ -1792,7 +1792,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* which doesn't accept a RDMA read response or atomic * which doesn't accept a RDMA read response or atomic
* response as an ACK for earlier SENDs or RDMA writes. * response as an ACK for earlier SENDs or RDMA writes.
*/ */
if (!(qp->s_flags & QIB_S_RESP_PENDING)) { if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
qp->r_nak_state = 0; qp->r_nak_state = 0;
qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
...@@ -1806,7 +1806,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, ...@@ -1806,7 +1806,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
break; break;
} }
qp->s_ack_state = OP(ACKNOWLEDGE); qp->s_ack_state = OP(ACKNOWLEDGE);
qp->s_flags |= QIB_S_RESP_PENDING; qp->s_flags |= RVT_S_RESP_PENDING;
qp->r_nak_state = 0; qp->r_nak_state = 0;
qib_schedule_send(qp); qib_schedule_send(qp);
...@@ -1949,8 +1949,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, ...@@ -1949,8 +1949,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
break; break;
} }
if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
qp->r_flags |= QIB_R_COMM_EST; qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -2029,7 +2029,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, ...@@ -2029,7 +2029,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
qib_copy_sge(&qp->r_sge, data, tlen, 1); qib_copy_sge(&qp->r_sge, data, tlen, 1);
qib_put_ss(&qp->r_sge); qib_put_ss(&qp->r_sge);
qp->r_msn++; qp->r_msn++;
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
break; break;
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
...@@ -2158,7 +2158,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, ...@@ -2158,7 +2158,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
qp->r_head_ack_queue = next; qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */ /* Schedule the send tasklet. */
qp->s_flags |= QIB_S_RESP_PENDING; qp->s_flags |= RVT_S_RESP_PENDING;
qib_schedule_send(qp); qib_schedule_send(qp);
goto sunlock; goto sunlock;
...@@ -2222,7 +2222,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, ...@@ -2222,7 +2222,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
qp->r_head_ack_queue = next; qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */ /* Schedule the send tasklet. */
qp->s_flags |= QIB_S_RESP_PENDING; qp->s_flags |= RVT_S_RESP_PENDING;
qib_schedule_send(qp); qib_schedule_send(qp);
goto sunlock; goto sunlock;
...@@ -2246,7 +2246,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, ...@@ -2246,7 +2246,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue RNR NAK for later */ /* Queue RNR NAK for later */
if (list_empty(&qp->rspwait)) { if (list_empty(&qp->rspwait)) {
qp->r_flags |= QIB_R_RSP_NAK; qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list); list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
} }
...@@ -2258,7 +2258,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, ...@@ -2258,7 +2258,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */ /* Queue NAK for later */
if (list_empty(&qp->rspwait)) { if (list_empty(&qp->rspwait)) {
qp->r_flags |= QIB_R_RSP_NAK; qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list); list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
} }
...@@ -2272,7 +2272,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, ...@@ -2272,7 +2272,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */ /* Queue NAK for later */
if (list_empty(&qp->rspwait)) { if (list_empty(&qp->rspwait)) {
qp->r_flags |= QIB_R_RSP_NAK; qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list); list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
} }
......
...@@ -190,7 +190,7 @@ int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only) ...@@ -190,7 +190,7 @@ int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
qp->r_wr_id = wqe->wr_id; qp->r_wr_id = wqe->wr_id;
ret = 1; ret = 1;
set_bit(QIB_R_WRID_VALID, &qp->r_aflags); set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
if (handler) { if (handler) {
u32 n; u32 n;
...@@ -378,11 +378,11 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -378,11 +378,11 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */ /* Return if we are already busy processing a work request. */
if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) || if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND)) !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
goto unlock; goto unlock;
sqp->s_flags |= QIB_S_BUSY; sqp->s_flags |= RVT_S_BUSY;
again: again:
if (sqp->s_last == sqp->s_head) if (sqp->s_last == sqp->s_head)
...@@ -547,7 +547,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -547,7 +547,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
if (release) if (release)
qib_put_ss(&qp->r_sge); qib_put_ss(&qp->r_sge);
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto send_comp; goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
...@@ -592,7 +592,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -592,7 +592,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK)) if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
goto clr_busy; goto clr_busy;
sqp->s_flags |= QIB_S_WAIT_RNR; sqp->s_flags |= RVT_S_WAIT_RNR;
sqp->s_timer.function = qib_rc_rnr_retry; sqp->s_timer.function = qib_rc_rnr_retry;
sqp->s_timer.expires = jiffies + sqp->s_timer.expires = jiffies +
usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
...@@ -622,7 +622,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -622,7 +622,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
if (sqp->ibqp.qp_type == IB_QPT_RC) { if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR); int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
sqp->s_flags &= ~QIB_S_BUSY; sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags); spin_unlock_irqrestore(&sqp->s_lock, flags);
if (lastwqe) { if (lastwqe) {
struct ib_event ev; struct ib_event ev;
...@@ -635,7 +635,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -635,7 +635,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
goto done; goto done;
} }
clr_busy: clr_busy:
sqp->s_flags &= ~QIB_S_BUSY; sqp->s_flags &= ~RVT_S_BUSY;
unlock: unlock:
spin_unlock_irqrestore(&sqp->s_lock, flags); spin_unlock_irqrestore(&sqp->s_lock, flags);
done: done:
...@@ -751,7 +751,7 @@ void qib_do_send(struct work_struct *work) ...@@ -751,7 +751,7 @@ void qib_do_send(struct work_struct *work)
return; return;
} }
qp->s_flags |= QIB_S_BUSY; qp->s_flags |= RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
...@@ -794,7 +794,7 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -794,7 +794,7 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */ /* See ch. 11.2.4.1 and 10.7.3.1 */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) || (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) { status != IB_WC_SUCCESS) {
struct ib_wc wc; struct ib_wc wc;
......
...@@ -703,11 +703,11 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -703,11 +703,11 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
ibp = &ppd->ibport_data; ibp = &ppd->ibport_data;
ibp->rvp.n_dmawait++; ibp->rvp.n_dmawait++;
qp->s_flags |= QIB_S_WAIT_DMA_DESC; qp->s_flags |= RVT_S_WAIT_DMA_DESC;
list_add_tail(&priv->iowait, &dev->dmawait); list_add_tail(&priv->iowait, &dev->dmawait);
} }
spin_unlock(&dev->rdi.pending_lock); spin_unlock(&dev->rdi.pending_lock);
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
spin_unlock(&qp->s_lock); spin_unlock(&qp->s_lock);
ret = -EBUSY; ret = -EBUSY;
} else { } else {
......
...@@ -65,7 +65,7 @@ int qib_make_uc_req(struct rvt_qp *qp) ...@@ -65,7 +65,7 @@ int qib_make_uc_req(struct rvt_qp *qp)
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) { if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA; qp->s_flags |= RVT_S_WAIT_DMA;
goto bail; goto bail;
} }
wqe = get_swqe_ptr(qp, qp->s_last); wqe = get_swqe_ptr(qp, qp->s_last);
...@@ -221,7 +221,7 @@ int qib_make_uc_req(struct rvt_qp *qp) ...@@ -221,7 +221,7 @@ int qib_make_uc_req(struct rvt_qp *qp)
goto unlock; goto unlock;
bail: bail:
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
unlock: unlock:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
return ret; return ret;
...@@ -279,7 +279,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -279,7 +279,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
inv: inv:
if (qp->r_state == OP(SEND_FIRST) || if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) { qp->r_state == OP(SEND_MIDDLE)) {
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
} else } else
qib_put_ss(&qp->r_sge); qib_put_ss(&qp->r_sge);
...@@ -329,8 +329,8 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -329,8 +329,8 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
goto inv; goto inv;
} }
if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
qp->r_flags |= QIB_R_COMM_EST; qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -347,7 +347,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -347,7 +347,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
case OP(SEND_ONLY): case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE): case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first: send_first:
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge; qp->r_sge = qp->s_rdma_read_sge;
else { else {
ret = qib_get_rwqe(qp, 0); ret = qib_get_rwqe(qp, 0);
...@@ -484,7 +484,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -484,7 +484,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
tlen -= (hdrsize + pad + 4); tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop; goto drop;
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qib_put_ss(&qp->s_rdma_read_sge); qib_put_ss(&qp->s_rdma_read_sge);
else { else {
ret = qib_get_rwqe(qp, 1); ret = qib_get_rwqe(qp, 1);
...@@ -524,7 +524,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -524,7 +524,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
return; return;
rewind: rewind:
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
drop: drop:
ibp->rvp.n_pkt_drops++; ibp->rvp.n_pkt_drops++;
......
...@@ -141,8 +141,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -141,8 +141,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
/* /*
* Get the next work request entry to find where to put the data. * Get the next work request entry to find where to put the data.
*/ */
if (qp->r_flags & QIB_R_REUSE_SGE) if (qp->r_flags & RVT_R_REUSE_SGE)
qp->r_flags &= ~QIB_R_REUSE_SGE; qp->r_flags &= ~RVT_R_REUSE_SGE;
else { else {
int ret; int ret;
...@@ -159,7 +159,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -159,7 +159,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} }
/* Silently drop packets which are too big. */ /* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) { if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE; qp->r_flags |= RVT_R_REUSE_SGE;
ibp->rvp.n_pkt_drops++; ibp->rvp.n_pkt_drops++;
goto bail_unlock; goto bail_unlock;
} }
...@@ -203,7 +203,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -203,7 +203,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
length -= len; length -= len;
} }
qib_put_ss(&qp->r_sge); qib_put_ss(&qp->r_sge);
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock; goto bail_unlock;
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
...@@ -260,7 +260,7 @@ int qib_make_ud_req(struct rvt_qp *qp) ...@@ -260,7 +260,7 @@ int qib_make_ud_req(struct rvt_qp *qp)
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) { if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA; qp->s_flags |= RVT_S_WAIT_DMA;
goto bail; goto bail;
} }
wqe = get_swqe_ptr(qp, qp->s_last); wqe = get_swqe_ptr(qp, qp->s_last);
...@@ -297,7 +297,7 @@ int qib_make_ud_req(struct rvt_qp *qp) ...@@ -297,7 +297,7 @@ int qib_make_ud_req(struct rvt_qp *qp)
* zero length descriptor so we get a callback. * zero length descriptor so we get a callback.
*/ */
if (atomic_read(&priv->s_dma_busy)) { if (atomic_read(&priv->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA; qp->s_flags |= RVT_S_WAIT_DMA;
goto bail; goto bail;
} }
qp->s_cur = next_cur; qp->s_cur = next_cur;
...@@ -389,7 +389,7 @@ int qib_make_ud_req(struct rvt_qp *qp) ...@@ -389,7 +389,7 @@ int qib_make_ud_req(struct rvt_qp *qp)
goto unlock; goto unlock;
bail: bail:
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
unlock: unlock:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
return ret; return ret;
...@@ -534,8 +534,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -534,8 +534,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
/* /*
* Get the next work request entry to find where to put the data. * Get the next work request entry to find where to put the data.
*/ */
if (qp->r_flags & QIB_R_REUSE_SGE) if (qp->r_flags & RVT_R_REUSE_SGE)
qp->r_flags &= ~QIB_R_REUSE_SGE; qp->r_flags &= ~RVT_R_REUSE_SGE;
else { else {
int ret; int ret;
...@@ -552,7 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -552,7 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
} }
/* Silently drop packets which are too big. */ /* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) { if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= QIB_R_REUSE_SGE; qp->r_flags |= RVT_R_REUSE_SGE;
goto drop; goto drop;
} }
if (has_grh) { if (has_grh) {
...@@ -563,7 +563,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, ...@@ -563,7 +563,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
qib_put_ss(&qp->r_sge); qib_put_ss(&qp->r_sge);
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return; return;
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
......
...@@ -734,8 +734,8 @@ static void mem_timer(unsigned long data) ...@@ -734,8 +734,8 @@ static void mem_timer(unsigned long data)
if (qp) { if (qp) {
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (qp->s_flags & QIB_S_WAIT_KMEM) { if (qp->s_flags & RVT_S_WAIT_KMEM) {
qp->s_flags &= ~QIB_S_WAIT_KMEM; qp->s_flags &= ~RVT_S_WAIT_KMEM;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
...@@ -958,10 +958,10 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, ...@@ -958,10 +958,10 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
list_empty(&priv->iowait)) { list_empty(&priv->iowait)) {
dev->n_txwait++; dev->n_txwait++;
qp->s_flags |= QIB_S_WAIT_TX; qp->s_flags |= RVT_S_WAIT_TX;
list_add_tail(&priv->iowait, &dev->txwait); list_add_tail(&priv->iowait, &dev->txwait);
} }
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
spin_unlock(&dev->rdi.pending_lock); spin_unlock(&dev->rdi.pending_lock);
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
tx = ERR_PTR(-EBUSY); tx = ERR_PTR(-EBUSY);
...@@ -1030,8 +1030,8 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) ...@@ -1030,8 +1030,8 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (qp->s_flags & QIB_S_WAIT_TX) { if (qp->s_flags & RVT_S_WAIT_TX) {
qp->s_flags &= ~QIB_S_WAIT_TX; qp->s_flags &= ~RVT_S_WAIT_TX;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
...@@ -1081,8 +1081,8 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) ...@@ -1081,8 +1081,8 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
qp = qps[i]; qp = qps[i];
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
spin_unlock(&qp->s_lock); spin_unlock(&qp->s_lock);
...@@ -1119,8 +1119,8 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) ...@@ -1119,8 +1119,8 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
if (atomic_dec_and_test(&priv->s_dma_busy)) { if (atomic_dec_and_test(&priv->s_dma_busy)) {
if (qp->state == IB_QPS_RESET) if (qp->state == IB_QPS_RESET)
wake_up(&priv->wait_dma); wake_up(&priv->wait_dma);
else if (qp->s_flags & QIB_S_WAIT_DMA) { else if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~QIB_S_WAIT_DMA; qp->s_flags &= ~RVT_S_WAIT_DMA;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
} }
...@@ -1141,11 +1141,11 @@ static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp) ...@@ -1141,11 +1141,11 @@ static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
if (list_empty(&priv->iowait)) { if (list_empty(&priv->iowait)) {
if (list_empty(&dev->memwait)) if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1); mod_timer(&dev->mem_timer, jiffies + 1);
qp->s_flags |= QIB_S_WAIT_KMEM; qp->s_flags |= RVT_S_WAIT_KMEM;
list_add_tail(&priv->iowait, &dev->memwait); list_add_tail(&priv->iowait, &dev->memwait);
} }
spin_unlock(&dev->rdi.pending_lock); spin_unlock(&dev->rdi.pending_lock);
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY; ret = -EBUSY;
} }
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
...@@ -1277,13 +1277,13 @@ static int no_bufs_available(struct rvt_qp *qp) ...@@ -1277,13 +1277,13 @@ static int no_bufs_available(struct rvt_qp *qp)
spin_lock(&dev->rdi.pending_lock); spin_lock(&dev->rdi.pending_lock);
if (list_empty(&priv->iowait)) { if (list_empty(&priv->iowait)) {
dev->n_piowait++; dev->n_piowait++;
qp->s_flags |= QIB_S_WAIT_PIO; qp->s_flags |= RVT_S_WAIT_PIO;
list_add_tail(&priv->iowait, &dev->piowait); list_add_tail(&priv->iowait, &dev->piowait);
dd = dd_from_dev(dev); dd = dd_from_dev(dev);
dd->f_wantpiobuf_intr(dd, 1); dd->f_wantpiobuf_intr(dd, 1);
} }
spin_unlock(&dev->rdi.pending_lock); spin_unlock(&dev->rdi.pending_lock);
qp->s_flags &= ~QIB_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY; ret = -EBUSY;
} }
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
...@@ -1396,7 +1396,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr, ...@@ -1396,7 +1396,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
* @len: the length of the packet in bytes * @len: the length of the packet in bytes
* *
* Return zero if packet is sent or queued OK. * Return zero if packet is sent or queued OK.
* Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
*/ */
int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr, int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
u32 hdrwords, struct rvt_sge_state *ss, u32 len) u32 hdrwords, struct rvt_sge_state *ss, u32 len)
...@@ -1564,8 +1564,8 @@ void qib_ib_piobufavail(struct qib_devdata *dd) ...@@ -1564,8 +1564,8 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
qp = qps[i]; qp = qps[i];
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (qp->s_flags & QIB_S_WAIT_PIO) { if (qp->s_flags & RVT_S_WAIT_PIO) {
qp->s_flags &= ~QIB_S_WAIT_PIO; qp->s_flags &= ~RVT_S_WAIT_PIO;
qib_schedule_send(qp); qib_schedule_send(qp);
} }
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
......
...@@ -262,78 +262,6 @@ struct qib_qp_priv { ...@@ -262,78 +262,6 @@ struct qib_qp_priv {
struct rvt_qp *owner; struct rvt_qp *owner;
}; };
/*
* Atomic bit definitions for r_aflags.
*/
#define QIB_R_WRID_VALID 0
#define QIB_R_REWIND_SGE 1
/*
* Bit definitions for r_flags.
*/
#define QIB_R_REUSE_SGE 0x01
#define QIB_R_RDMAR_SEQ 0x02
#define QIB_R_RSP_NAK 0x04
#define QIB_R_RSP_SEND 0x08
#define QIB_R_COMM_EST 0x10
/*
* Bit definitions for s_flags.
*
* QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
* QIB_S_BUSY - send tasklet is processing the QP
* QIB_S_TIMER - the RC retry timer is active
* QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
* QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
* before processing the next SWQE
* QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
* before processing the next SWQE
* QIB_S_WAIT_RNR - waiting for RNR timeout
* QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
* QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
* next send completion entry not via send DMA
* QIB_S_WAIT_PIO - waiting for a send buffer to be available
* QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
* QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
* QIB_S_WAIT_KMEM - waiting for kernel memory to be available
* QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
* QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
* QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
*/
#define QIB_S_SIGNAL_REQ_WR 0x0001
#define QIB_S_BUSY 0x0002
#define QIB_S_TIMER 0x0004
#define QIB_S_RESP_PENDING 0x0008
#define QIB_S_ACK_PENDING 0x0010
#define QIB_S_WAIT_FENCE 0x0020
#define QIB_S_WAIT_RDMAR 0x0040
#define QIB_S_WAIT_RNR 0x0080
#define QIB_S_WAIT_SSN_CREDIT 0x0100
#define QIB_S_WAIT_DMA 0x0200
#define QIB_S_WAIT_PIO 0x0400
#define QIB_S_WAIT_TX 0x0800
#define QIB_S_WAIT_DMA_DESC 0x1000
#define QIB_S_WAIT_KMEM 0x2000
#define QIB_S_WAIT_PSN 0x4000
#define QIB_S_WAIT_ACK 0x8000
#define QIB_S_SEND_ONE 0x10000
#define QIB_S_UNLIMITED_CREDIT 0x20000
/*
* Wait flags that would prevent any packet type from being sent.
*/
#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
/*
* Wait flags that would prevent send work requests from making progress.
*/
#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
#define QIB_PSN_CREDIT 16 #define QIB_PSN_CREDIT 16
/* /*
...@@ -473,9 +401,9 @@ static inline struct qib_ibdev *to_idev(struct ib_device *ibdev) ...@@ -473,9 +401,9 @@ static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
*/ */
static inline int qib_send_ok(struct rvt_qp *qp) static inline int qib_send_ok(struct rvt_qp *qp)
{ {
return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
(qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
!(qp->s_flags & QIB_S_ANY_WAIT_SEND)); !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment