Commit 54d10c1e authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford

staging/rdma/hfi1: Use rdmavt send flags and recv flags

Use the definitions of the s_flags and r_flags which are now in rdmavt.
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarHarish Chegondi <harish.chegondi@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 90963ad7
...@@ -781,14 +781,14 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet) ...@@ -781,14 +781,14 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
*/ */
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait); list_del_init(&qp->rspwait);
if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) { if (qp->r_flags & RVT_R_RSP_NAK) {
qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK; qp->r_flags &= ~RVT_R_RSP_NAK;
hfi1_send_rc_ack(rcd, qp, 0); hfi1_send_rc_ack(rcd, qp, 0);
} }
if (qp->r_flags & HFI1_R_RSP_SEND) { if (qp->r_flags & RVT_R_RSP_SEND) {
unsigned long flags; unsigned long flags;
qp->r_flags &= ~HFI1_R_RSP_SEND; qp->r_flags &= ~RVT_R_RSP_SEND;
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
if (ib_hfi1_state_ops[qp->state] & if (ib_hfi1_state_ops[qp->state] &
HFI1_PROCESS_OR_FLUSH_SEND) HFI1_PROCESS_OR_FLUSH_SEND)
......
...@@ -1564,7 +1564,7 @@ static void sc_piobufavail(struct send_context *sc) ...@@ -1564,7 +1564,7 @@ static void sc_piobufavail(struct send_context *sc)
write_sequnlock_irqrestore(&dev->iowait_lock, flags); write_sequnlock_irqrestore(&dev->iowait_lock, flags);
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
hfi1_qp_wakeup(qps[i], HFI1_S_WAIT_PIO); hfi1_qp_wakeup(qps[i], RVT_S_WAIT_PIO);
} }
/* translate a send credit update to a bit code of reasons */ /* translate a send credit update to a bit code of reasons */
......
...@@ -360,7 +360,7 @@ static void reset_qp(struct rvt_qp *qp, enum ib_qp_type type) ...@@ -360,7 +360,7 @@ static void reset_qp(struct rvt_qp *qp, enum ib_qp_type type)
hfi1_do_send, hfi1_do_send,
iowait_sleep, iowait_sleep,
iowait_wakeup); iowait_wakeup);
qp->s_flags &= HFI1_S_SIGNAL_REQ_WR; qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
qp->s_wqe = NULL; qp->s_wqe = NULL;
qp->s_draining = 0; qp->s_draining = 0;
...@@ -407,7 +407,7 @@ static void clear_mr_refs(struct rvt_qp *qp, int clr_sends) ...@@ -407,7 +407,7 @@ static void clear_mr_refs(struct rvt_qp *qp, int clr_sends)
{ {
unsigned n; unsigned n;
if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
hfi1_put_ss(&qp->s_rdma_read_sge); hfi1_put_ss(&qp->s_rdma_read_sge);
hfi1_put_ss(&qp->r_sge); hfi1_put_ss(&qp->r_sge);
...@@ -471,24 +471,24 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) ...@@ -471,24 +471,24 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
qp->state = IB_QPS_ERR; qp->state = IB_QPS_ERR;
if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) { if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer); del_timer(&qp->s_timer);
} }
if (qp->s_flags & HFI1_S_ANY_WAIT_SEND) if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND; qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
write_seqlock(&dev->iowait_lock); write_seqlock(&dev->iowait_lock);
if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) { if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
qp->s_flags &= ~HFI1_S_ANY_WAIT_IO; qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
list_del_init(&priv->s_iowait.list); list_del_init(&priv->s_iowait.list);
if (atomic_dec_and_test(&qp->refcount)) if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait); wake_up(&qp->wait);
} }
write_sequnlock(&dev->iowait_lock); write_sequnlock(&dev->iowait_lock);
if (!(qp->s_flags & HFI1_S_BUSY)) { if (!(qp->s_flags & RVT_S_BUSY)) {
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
if (qp->s_rdma_mr) { if (qp->s_rdma_mr) {
rvt_put_mr(qp->s_rdma_mr); rvt_put_mr(qp->s_rdma_mr);
...@@ -507,7 +507,7 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err) ...@@ -507,7 +507,7 @@ int hfi1_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
wc.qp = &qp->ibqp; wc.qp = &qp->ibqp;
wc.opcode = IB_WC_RECV; wc.opcode = IB_WC_RECV;
if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) { if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
wc.status = err; wc.status = err;
hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
...@@ -742,7 +742,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -742,7 +742,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (qp->state != IB_QPS_RESET) { if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
flush_iowait(qp); flush_iowait(qp);
qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
spin_unlock(&qp->s_lock); spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock); spin_unlock_irq(&qp->r_lock);
/* Stop the sending work queue and retry timer */ /* Stop the sending work queue and retry timer */
...@@ -762,7 +762,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -762,7 +762,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
case IB_QPS_RTR: case IB_QPS_RTR:
/* Allow event to re-trigger if QP set to RTR more than once */ /* Allow event to re-trigger if QP set to RTR more than once */
qp->r_flags &= ~HFI1_R_COMM_EST; qp->r_flags &= ~RVT_R_COMM_EST;
qp->state = new_state; qp->state = new_state;
break; break;
...@@ -828,7 +828,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -828,7 +828,7 @@ int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->remote_ah_attr = qp->alt_ah_attr; qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = qp->alt_ah_attr.port_num; qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index; qp->s_pkey_index = qp->s_alt_pkey_index;
qp->s_flags |= HFI1_S_AHG_CLEAR; qp->s_flags |= RVT_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
} }
...@@ -954,7 +954,7 @@ int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -954,7 +954,7 @@ int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->recv_cq = qp->ibqp.recv_cq; init_attr->recv_cq = qp->ibqp.recv_cq;
init_attr->srq = qp->ibqp.srq; init_attr->srq = qp->ibqp.srq;
init_attr->cap = attr->cap; init_attr->cap = attr->cap;
if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR) if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
else else
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
...@@ -1154,7 +1154,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd, ...@@ -1154,7 +1154,7 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
qp->s_size = init_attr->cap.max_send_wr + 1; qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge; qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
qp->s_flags = HFI1_S_SIGNAL_REQ_WR; qp->s_flags = RVT_S_SIGNAL_REQ_WR;
dev = to_idev(ibpd->device); dev = to_idev(ibpd->device);
dd = dd_from_dev(dev); dd = dd_from_dev(dev);
err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type, err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type,
...@@ -1292,7 +1292,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp) ...@@ -1292,7 +1292,7 @@ int hfi1_destroy_qp(struct ib_qp *ibqp)
if (qp->state != IB_QPS_RESET) { if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
flush_iowait(qp); flush_iowait(qp);
qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT); qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
spin_unlock(&qp->s_lock); spin_unlock(&qp->s_lock);
spin_unlock_irq(&qp->r_lock); spin_unlock_irq(&qp->r_lock);
cancel_work_sync(&priv->s_iowait.iowork); cancel_work_sync(&priv->s_iowait.iowork);
...@@ -1398,20 +1398,20 @@ void hfi1_get_credit(struct rvt_qp *qp, u32 aeth) ...@@ -1398,20 +1398,20 @@ void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
* honor the credit field. * honor the credit field.
*/ */
if (credit == HFI1_AETH_CREDIT_INVAL) { if (credit == HFI1_AETH_CREDIT_INVAL) {
if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) { if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
qp->s_flags |= HFI1_S_UNLIMITED_CREDIT; qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) { if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT; qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp); hfi1_schedule_send(qp);
} }
} }
} else if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) { } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */ /* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK; credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
if (cmp_msn(credit, qp->s_lsn) > 0) { if (cmp_msn(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit; qp->s_lsn = credit;
if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) { if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT; qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp); hfi1_schedule_send(qp);
} }
} }
...@@ -1469,13 +1469,13 @@ static int iowait_sleep( ...@@ -1469,13 +1469,13 @@ static int iowait_sleep(
to_iport(qp->ibqp.device, qp->port_num); to_iport(qp->ibqp.device, qp->port_num);
ibp->rvp.n_dmawait++; ibp->rvp.n_dmawait++;
qp->s_flags |= HFI1_S_WAIT_DMA_DESC; qp->s_flags |= RVT_S_WAIT_DMA_DESC;
list_add_tail(&priv->s_iowait.list, &sde->dmawait); list_add_tail(&priv->s_iowait.list, &sde->dmawait);
trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC); trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
} }
write_sequnlock(&dev->iowait_lock); write_sequnlock(&dev->iowait_lock);
qp->s_flags &= ~HFI1_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EBUSY; ret = -EBUSY;
} else { } else {
...@@ -1495,7 +1495,7 @@ static void iowait_wakeup(struct iowait *wait, int reason) ...@@ -1495,7 +1495,7 @@ static void iowait_wakeup(struct iowait *wait, int reason)
struct rvt_qp *qp = iowait_to_qp(wait); struct rvt_qp *qp = iowait_to_qp(wait);
WARN_ON(reason != SDMA_AVAIL_REASON); WARN_ON(reason != SDMA_AVAIL_REASON);
hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC); hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
} }
int hfi1_qp_init(struct hfi1_ibdev *dev) int hfi1_qp_init(struct hfi1_ibdev *dev)
...@@ -1712,7 +1712,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) ...@@ -1712,7 +1712,7 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
void qp_comm_est(struct rvt_qp *qp) void qp_comm_est(struct rvt_qp *qp)
{ {
qp->r_flags |= HFI1_R_COMM_EST; qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -1736,7 +1736,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp) ...@@ -1736,7 +1736,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp)
qp->remote_ah_attr = qp->alt_ah_attr; qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = qp->alt_ah_attr.port_num; qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index; qp->s_pkey_index = qp->s_alt_pkey_index;
qp->s_flags |= HFI1_S_AHG_CLEAR; qp->s_flags |= RVT_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
......
...@@ -125,7 +125,7 @@ static inline void clear_ahg(struct rvt_qp *qp) ...@@ -125,7 +125,7 @@ static inline void clear_ahg(struct rvt_qp *qp)
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;
priv->s_hdr->ahgcount = 0; priv->s_hdr->ahgcount = 0;
qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR); qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
if (priv->s_sde && qp->s_ahgidx >= 0) if (priv->s_sde && qp->s_ahgidx >= 0)
sdma_ahg_free(priv->s_sde, qp->s_ahgidx); sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
qp->s_ahgidx = -1; qp->s_ahgidx = -1;
......
This diff is collapsed.
...@@ -208,7 +208,7 @@ int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only) ...@@ -208,7 +208,7 @@ int hfi1_get_rwqe(struct rvt_qp *qp, int wr_id_only)
qp->r_wr_id = wqe->wr_id; qp->r_wr_id = wqe->wr_id;
ret = 1; ret = 1;
set_bit(HFI1_R_WRID_VALID, &qp->r_aflags); set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
if (handler) { if (handler) {
u32 n; u32 n;
...@@ -382,11 +382,11 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -382,11 +382,11 @@ static void ruc_loopback(struct rvt_qp *sqp)
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */ /* Return if we are already busy processing a work request. */
if ((sqp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT)) || if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_OR_FLUSH_SEND)) !(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
goto unlock; goto unlock;
sqp->s_flags |= HFI1_S_BUSY; sqp->s_flags |= RVT_S_BUSY;
again: again:
if (sqp->s_last == sqp->s_head) if (sqp->s_last == sqp->s_head)
...@@ -550,7 +550,7 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -550,7 +550,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
if (release) if (release)
hfi1_put_ss(&qp->r_sge); hfi1_put_ss(&qp->r_sge);
if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto send_comp; goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
...@@ -595,7 +595,7 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -595,7 +595,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_RECV_OK)) if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_RECV_OK))
goto clr_busy; goto clr_busy;
sqp->s_flags |= HFI1_S_WAIT_RNR; sqp->s_flags |= RVT_S_WAIT_RNR;
sqp->s_timer.function = hfi1_rc_rnr_retry; sqp->s_timer.function = hfi1_rc_rnr_retry;
sqp->s_timer.expires = jiffies + sqp->s_timer.expires = jiffies +
usecs_to_jiffies(ib_hfi1_rnr_table[qp->r_min_rnr_timer]); usecs_to_jiffies(ib_hfi1_rnr_table[qp->r_min_rnr_timer]);
...@@ -625,7 +625,7 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -625,7 +625,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
if (sqp->ibqp.qp_type == IB_QPT_RC) { if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = hfi1_error_qp(sqp, IB_WC_WR_FLUSH_ERR); int lastwqe = hfi1_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
sqp->s_flags &= ~HFI1_S_BUSY; sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags); spin_unlock_irqrestore(&sqp->s_lock, flags);
if (lastwqe) { if (lastwqe) {
struct ib_event ev; struct ib_event ev;
...@@ -638,7 +638,7 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -638,7 +638,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
goto done; goto done;
} }
clr_busy: clr_busy:
sqp->s_flags &= ~HFI1_S_BUSY; sqp->s_flags &= ~RVT_S_BUSY;
unlock: unlock:
spin_unlock_irqrestore(&sqp->s_lock, flags); spin_unlock_irqrestore(&sqp->s_lock, flags);
done: done:
...@@ -694,9 +694,9 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, ...@@ -694,9 +694,9 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
static inline void build_ahg(struct rvt_qp *qp, u32 npsn) static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
{ {
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;
if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR)) if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
clear_ahg(qp); clear_ahg(qp);
if (!(qp->s_flags & HFI1_S_AHG_VALID)) { if (!(qp->s_flags & RVT_S_AHG_VALID)) {
/* first middle that needs copy */ /* first middle that needs copy */
if (qp->s_ahgidx < 0) if (qp->s_ahgidx < 0)
qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde); qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
...@@ -706,7 +706,7 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn) ...@@ -706,7 +706,7 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
/* save to protect a change in another thread */ /* save to protect a change in another thread */
priv->s_hdr->sde = priv->s_sde; priv->s_hdr->sde = priv->s_sde;
priv->s_hdr->ahgidx = qp->s_ahgidx; priv->s_hdr->ahgidx = qp->s_ahgidx;
qp->s_flags |= HFI1_S_AHG_VALID; qp->s_flags |= RVT_S_AHG_VALID;
} }
} else { } else {
/* subsequent middle after valid */ /* subsequent middle after valid */
...@@ -779,7 +779,7 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, ...@@ -779,7 +779,7 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
if (middle) if (middle)
build_ahg(qp, bth2); build_ahg(qp, bth2);
else else
qp->s_flags &= ~HFI1_S_AHG_VALID; qp->s_flags &= ~RVT_S_AHG_VALID;
priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0); priv->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
priv->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); priv->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
priv->s_hdr->ibh.lrh[2] = priv->s_hdr->ibh.lrh[2] =
...@@ -790,8 +790,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr, ...@@ -790,8 +790,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
bth0 |= extra_bytes << 20; bth0 |= extra_bytes << 20;
ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[0] = cpu_to_be32(bth0);
bth1 = qp->remote_qpn; bth1 = qp->remote_qpn;
if (qp->s_flags & HFI1_S_ECN) { if (qp->s_flags & RVT_S_ECN) {
qp->s_flags &= ~HFI1_S_ECN; qp->s_flags &= ~RVT_S_ECN;
/* we recently received a FECN, so return a BECN */ /* we recently received a FECN, so return a BECN */
bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT); bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
} }
...@@ -847,7 +847,7 @@ void hfi1_do_send(struct work_struct *work) ...@@ -847,7 +847,7 @@ void hfi1_do_send(struct work_struct *work)
return; return;
} }
qp->s_flags |= HFI1_S_BUSY; qp->s_flags |= RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
...@@ -897,7 +897,7 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -897,7 +897,7 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */ /* See ch. 11.2.4.1 and 10.7.3.1 */
if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) || if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) || (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) { status != IB_WC_SUCCESS) {
struct ib_wc wc; struct ib_wc wc;
......
...@@ -84,7 +84,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp) ...@@ -84,7 +84,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp)
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_iowait.sdma_busy)) { if (atomic_read(&priv->s_iowait.sdma_busy)) {
qp->s_flags |= HFI1_S_WAIT_DMA; qp->s_flags |= RVT_S_WAIT_DMA;
goto bail; goto bail;
} }
clear_ahg(qp); clear_ahg(qp);
...@@ -241,7 +241,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp) ...@@ -241,7 +241,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp)
goto unlock; goto unlock;
bail: bail:
qp->s_flags &= ~HFI1_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
unlock: unlock:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
return ret; return ret;
...@@ -332,7 +332,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -332,7 +332,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
inv: inv:
if (qp->r_state == OP(SEND_FIRST) || if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) { qp->r_state == OP(SEND_MIDDLE)) {
set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags); set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
} else } else
hfi1_put_ss(&qp->r_sge); hfi1_put_ss(&qp->r_sge);
...@@ -382,7 +382,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -382,7 +382,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
goto inv; goto inv;
} }
if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST)) if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp_comm_est(qp); qp_comm_est(qp);
/* OK, process the packet. */ /* OK, process the packet. */
...@@ -391,7 +391,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -391,7 +391,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
case OP(SEND_ONLY): case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE): case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first: send_first:
if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge; qp->r_sge = qp->s_rdma_read_sge;
else { else {
ret = hfi1_get_rwqe(qp, 0); ret = hfi1_get_rwqe(qp, 0);
...@@ -536,7 +536,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -536,7 +536,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
tlen -= (hdrsize + pad + 4); tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop; goto drop;
if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags)) if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
hfi1_put_ss(&qp->s_rdma_read_sge); hfi1_put_ss(&qp->s_rdma_read_sge);
else { else {
ret = hfi1_get_rwqe(qp, 1); ret = hfi1_get_rwqe(qp, 1);
...@@ -576,7 +576,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -576,7 +576,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
return; return;
rewind: rewind:
set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags); set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0; qp->r_sge.num_sge = 0;
drop: drop:
ibp->rvp.n_pkt_drops++; ibp->rvp.n_pkt_drops++;
......
...@@ -161,8 +161,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -161,8 +161,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
/* /*
* Get the next work request entry to find where to put the data. * Get the next work request entry to find where to put the data.
*/ */
if (qp->r_flags & HFI1_R_REUSE_SGE) if (qp->r_flags & RVT_R_REUSE_SGE)
qp->r_flags &= ~HFI1_R_REUSE_SGE; qp->r_flags &= ~RVT_R_REUSE_SGE;
else { else {
int ret; int ret;
...@@ -179,7 +179,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -179,7 +179,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} }
/* Silently drop packets which are too big. */ /* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) { if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= HFI1_R_REUSE_SGE; qp->r_flags |= RVT_R_REUSE_SGE;
ibp->rvp.n_pkt_drops++; ibp->rvp.n_pkt_drops++;
goto bail_unlock; goto bail_unlock;
} }
...@@ -223,7 +223,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -223,7 +223,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
length -= len; length -= len;
} }
hfi1_put_ss(&qp->r_sge); hfi1_put_ss(&qp->r_sge);
if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock; goto bail_unlock;
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
...@@ -290,7 +290,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) ...@@ -290,7 +290,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_iowait.sdma_busy)) { if (atomic_read(&priv->s_iowait.sdma_busy)) {
qp->s_flags |= HFI1_S_WAIT_DMA; qp->s_flags |= RVT_S_WAIT_DMA;
goto bail; goto bail;
} }
wqe = get_swqe_ptr(qp, qp->s_last); wqe = get_swqe_ptr(qp, qp->s_last);
...@@ -324,7 +324,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) ...@@ -324,7 +324,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
* zero length descriptor so we get a callback. * zero length descriptor so we get a callback.
*/ */
if (atomic_read(&priv->s_iowait.sdma_busy)) { if (atomic_read(&priv->s_iowait.sdma_busy)) {
qp->s_flags |= HFI1_S_WAIT_DMA; qp->s_flags |= RVT_S_WAIT_DMA;
goto bail; goto bail;
} }
qp->s_cur = next_cur; qp->s_cur = next_cur;
...@@ -426,7 +426,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp) ...@@ -426,7 +426,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp)
goto unlock; goto unlock;
bail: bail:
qp->s_flags &= ~HFI1_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
unlock: unlock:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
return ret; return ret;
...@@ -812,8 +812,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) ...@@ -812,8 +812,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
/* /*
* Get the next work request entry to find where to put the data. * Get the next work request entry to find where to put the data.
*/ */
if (qp->r_flags & HFI1_R_REUSE_SGE) if (qp->r_flags & RVT_R_REUSE_SGE)
qp->r_flags &= ~HFI1_R_REUSE_SGE; qp->r_flags &= ~RVT_R_REUSE_SGE;
else { else {
int ret; int ret;
...@@ -830,7 +830,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) ...@@ -830,7 +830,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
} }
/* Silently drop packets which are too big. */ /* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) { if (unlikely(wc.byte_len > qp->r_len)) {
qp->r_flags |= HFI1_R_REUSE_SGE; qp->r_flags |= RVT_R_REUSE_SGE;
goto drop; goto drop;
} }
if (has_grh) { if (has_grh) {
...@@ -841,7 +841,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) ...@@ -841,7 +841,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
hfi1_put_ss(&qp->r_sge); hfi1_put_ss(&qp->r_sge);
if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return; return;
wc.wr_id = qp->r_wr_id; wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
......
...@@ -702,7 +702,7 @@ static void mem_timer(unsigned long data) ...@@ -702,7 +702,7 @@ static void mem_timer(unsigned long data)
write_sequnlock_irqrestore(&dev->iowait_lock, flags); write_sequnlock_irqrestore(&dev->iowait_lock, flags);
if (qp) if (qp)
hfi1_qp_wakeup(qp, HFI1_S_WAIT_KMEM); hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
} }
void update_sge(struct rvt_sge_state *ss, u32 length) void update_sge(struct rvt_sge_state *ss, u32 length)
...@@ -740,12 +740,12 @@ static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, ...@@ -740,12 +740,12 @@ static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK && if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK &&
list_empty(&priv->s_iowait.list)) { list_empty(&priv->s_iowait.list)) {
dev->n_txwait++; dev->n_txwait++;
qp->s_flags |= HFI1_S_WAIT_TX; qp->s_flags |= RVT_S_WAIT_TX;
list_add_tail(&priv->s_iowait.list, &dev->txwait); list_add_tail(&priv->s_iowait.list, &dev->txwait);
trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TX); trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
} }
qp->s_flags &= ~HFI1_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
write_sequnlock(&dev->iowait_lock); write_sequnlock(&dev->iowait_lock);
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
tx = ERR_PTR(-EBUSY); tx = ERR_PTR(-EBUSY);
...@@ -803,7 +803,7 @@ void hfi1_put_txreq(struct verbs_txreq *tx) ...@@ -803,7 +803,7 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
list_del_init(&priv->s_iowait.list); list_del_init(&priv->s_iowait.list);
/* refcount held until actual wake up */ /* refcount held until actual wake up */
write_sequnlock_irqrestore(&dev->iowait_lock, flags); write_sequnlock_irqrestore(&dev->iowait_lock, flags);
hfi1_qp_wakeup(qp, HFI1_S_WAIT_TX); hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
break; break;
} }
} while (read_seqretry(&dev->iowait_lock, seq)); } while (read_seqretry(&dev->iowait_lock, seq));
...@@ -838,8 +838,8 @@ static void verbs_sdma_complete( ...@@ -838,8 +838,8 @@ static void verbs_sdma_complete(
* do the flush work until that QP's * do the flush work until that QP's
* sdma work has finished. * sdma work has finished.
*/ */
if (qp->s_flags & HFI1_S_WAIT_DMA) { if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~HFI1_S_WAIT_DMA; qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp); hfi1_schedule_send(qp);
} }
} }
...@@ -860,13 +860,13 @@ static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp) ...@@ -860,13 +860,13 @@ static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp)
if (list_empty(&priv->s_iowait.list)) { if (list_empty(&priv->s_iowait.list)) {
if (list_empty(&dev->memwait)) if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1); mod_timer(&dev->mem_timer, jiffies + 1);
qp->s_flags |= HFI1_S_WAIT_KMEM; qp->s_flags |= RVT_S_WAIT_KMEM;
list_add_tail(&priv->s_iowait.list, &dev->memwait); list_add_tail(&priv->s_iowait.list, &dev->memwait);
trace_hfi1_qpsleep(qp, HFI1_S_WAIT_KMEM); trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
} }
write_sequnlock(&dev->iowait_lock); write_sequnlock(&dev->iowait_lock);
qp->s_flags &= ~HFI1_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY; ret = -EBUSY;
} }
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
...@@ -1092,17 +1092,17 @@ static int no_bufs_available(struct rvt_qp *qp, struct send_context *sc) ...@@ -1092,17 +1092,17 @@ static int no_bufs_available(struct rvt_qp *qp, struct send_context *sc)
int was_empty; int was_empty;
dev->n_piowait++; dev->n_piowait++;
qp->s_flags |= HFI1_S_WAIT_PIO; qp->s_flags |= RVT_S_WAIT_PIO;
was_empty = list_empty(&sc->piowait); was_empty = list_empty(&sc->piowait);
list_add_tail(&priv->s_iowait.list, &sc->piowait); list_add_tail(&priv->s_iowait.list, &sc->piowait);
trace_hfi1_qpsleep(qp, HFI1_S_WAIT_PIO); trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
/* counting: only call wantpiobuf_intr if first user */ /* counting: only call wantpiobuf_intr if first user */
if (was_empty) if (was_empty)
hfi1_sc_wantpiobuf_intr(sc, 1); hfi1_sc_wantpiobuf_intr(sc, 1);
} }
write_sequnlock(&dev->iowait_lock); write_sequnlock(&dev->iowait_lock);
qp->s_flags &= ~HFI1_S_BUSY; qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY; ret = -EBUSY;
} }
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
...@@ -1307,7 +1307,7 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd, ...@@ -1307,7 +1307,7 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
* @ps: the state of the packet to send * @ps: the state of the packet to send
* *
* Return zero if packet is sent or queued OK. * Return zero if packet is sent or queued OK.
* Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise. * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
*/ */
int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{ {
......
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include <rdma/ib_mad.h> #include <rdma/ib_mad.h>
#include <rdma/rdma_vt.h> #include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
struct hfi1_ctxtdata; struct hfi1_ctxtdata;
struct hfi1_pportdata; struct hfi1_pportdata;
...@@ -286,84 +287,6 @@ struct hfi1_pkt_state { ...@@ -286,84 +287,6 @@ struct hfi1_pkt_state {
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
}; };
/*
* Atomic bit definitions for r_aflags.
*/
#define HFI1_R_WRID_VALID 0
#define HFI1_R_REWIND_SGE 1
/*
* Bit definitions for r_flags.
*/
#define HFI1_R_REUSE_SGE 0x01
#define HFI1_R_RDMAR_SEQ 0x02
/* defer ack until end of interrupt session */
#define HFI1_R_RSP_DEFERED_ACK 0x04
/* relay ack to send engine */
#define HFI1_R_RSP_SEND 0x08
#define HFI1_R_COMM_EST 0x10
/*
* Bit definitions for s_flags.
*
* HFI1_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
* HFI1_S_BUSY - send tasklet is processing the QP
* HFI1_S_TIMER - the RC retry timer is active
* HFI1_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
* HFI1_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
* before processing the next SWQE
* HFI1_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
* before processing the next SWQE
* HFI1_S_WAIT_RNR - waiting for RNR timeout
* HFI1_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
* HFI1_S_WAIT_DMA - waiting for send DMA queue to drain before generating
* next send completion entry not via send DMA
* HFI1_S_WAIT_PIO - waiting for a send buffer to be available
* HFI1_S_WAIT_TX - waiting for a struct verbs_txreq to be available
* HFI1_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
* HFI1_S_WAIT_KMEM - waiting for kernel memory to be available
* HFI1_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
* HFI1_S_WAIT_ACK - waiting for an ACK packet before sending more requests
* HFI1_S_SEND_ONE - send one packet, request ACK, then wait for ACK
* HFI1_S_ECN - a BECN was queued to the send engine
*/
#define HFI1_S_SIGNAL_REQ_WR 0x0001
#define HFI1_S_BUSY 0x0002
#define HFI1_S_TIMER 0x0004
#define HFI1_S_RESP_PENDING 0x0008
#define HFI1_S_ACK_PENDING 0x0010
#define HFI1_S_WAIT_FENCE 0x0020
#define HFI1_S_WAIT_RDMAR 0x0040
#define HFI1_S_WAIT_RNR 0x0080
#define HFI1_S_WAIT_SSN_CREDIT 0x0100
#define HFI1_S_WAIT_DMA 0x0200
#define HFI1_S_WAIT_PIO 0x0400
#define HFI1_S_WAIT_TX 0x0800
#define HFI1_S_WAIT_DMA_DESC 0x1000
#define HFI1_S_WAIT_KMEM 0x2000
#define HFI1_S_WAIT_PSN 0x4000
#define HFI1_S_WAIT_ACK 0x8000
#define HFI1_S_SEND_ONE 0x10000
#define HFI1_S_UNLIMITED_CREDIT 0x20000
#define HFI1_S_AHG_VALID 0x40000
#define HFI1_S_AHG_CLEAR 0x80000
#define HFI1_S_ECN 0x100000
/*
* Wait flags that would prevent any packet type from being sent.
*/
#define HFI1_S_ANY_WAIT_IO (HFI1_S_WAIT_PIO | HFI1_S_WAIT_TX | \
HFI1_S_WAIT_DMA_DESC | HFI1_S_WAIT_KMEM)
/*
* Wait flags that would prevent send work requests from making progress.
*/
#define HFI1_S_ANY_WAIT_SEND (HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | \
HFI1_S_WAIT_RNR | HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_DMA | \
HFI1_S_WAIT_PSN | HFI1_S_WAIT_ACK)
#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | HFI1_S_ANY_WAIT_SEND)
#define HFI1_PSN_CREDIT 16 #define HFI1_PSN_CREDIT 16
/* /*
...@@ -507,9 +430,9 @@ static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait) ...@@ -507,9 +430,9 @@ static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
*/ */
static inline int hfi1_send_ok(struct rvt_qp *qp) static inline int hfi1_send_ok(struct rvt_qp *qp)
{ {
return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) && return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
(qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) || (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
!(qp->s_flags & HFI1_S_ANY_WAIT_SEND)); !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment