Commit 116aa033 authored by Venkata Sandeep Dhanalakota's avatar Venkata Sandeep Dhanalakota Committed by Jason Gunthorpe

IB/{hfi1, qib, rdmavt}: Move send completion logic to rdmavt

Moving send completion code into rdmavt in order to have shared logic
between qib and hfi1 drivers.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarBrian Welty <brian.welty@intel.com>
Signed-off-by: default avatarVenkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Signed-off-by: default avatarHarish Chegondi <harish.chegondi@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 019f118b
...@@ -309,7 +309,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -309,7 +309,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
} }
clear_ahg(qp); clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last); wqe = rvt_get_swqe_ptr(qp, qp->s_last);
hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */ /* will get called again */
goto done_free_tx; goto done_free_tx;
...@@ -378,7 +378,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -378,7 +378,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
wqe->wr.ex.invalidate_rkey); wqe->wr.ex.invalidate_rkey);
local_ops = 1; local_ops = 1;
} }
hfi1_send_complete(qp, wqe, rvt_send_complete(qp, wqe,
err ? IB_WC_LOC_PROT_ERR err ? IB_WC_LOC_PROT_ERR
: IB_WC_SUCCESS); : IB_WC_SUCCESS);
if (local_ops) if (local_ops)
...@@ -1043,7 +1043,7 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) ...@@ -1043,7 +1043,7 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
hfi1_migrate_qp(qp); hfi1_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt; qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) { } else if (qp->s_last == qp->s_acked) {
hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return; return;
} else { /* need to handle delayed completion */ } else { /* need to handle delayed completion */
...@@ -1468,7 +1468,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1468,7 +1468,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++; ibp->rvp.n_other_naks++;
class_b: class_b:
if (qp->s_last == qp->s_acked) { if (qp->s_last == qp->s_acked) {
hfi1_send_complete(qp, wqe, status); rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
} }
break; break;
...@@ -1706,7 +1706,7 @@ static void rc_rcv_resp(struct hfi1_packet *packet) ...@@ -1706,7 +1706,7 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
status = IB_WC_LOC_LEN_ERR; status = IB_WC_LOC_LEN_ERR;
ack_err: ack_err:
if (qp->s_last == qp->s_acked) { if (qp->s_last == qp->s_acked) {
hfi1_send_complete(qp, wqe, status); rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
} }
ack_done: ack_done:
......
...@@ -411,7 +411,7 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -411,7 +411,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
ibp->rvp.n_loop_pkts++; ibp->rvp.n_loop_pkts++;
flush_send: flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
hfi1_send_complete(sqp, wqe, send_status); rvt_send_complete(sqp, wqe, send_status);
if (local_ops) { if (local_ops) {
atomic_dec(&sqp->local_ops_pending); atomic_dec(&sqp->local_ops_pending);
local_ops = 0; local_ops = 0;
...@@ -459,7 +459,7 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -459,7 +459,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
serr: serr:
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
hfi1_send_complete(sqp, wqe, send_status); rvt_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) { if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
...@@ -922,44 +922,3 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread) ...@@ -922,44 +922,3 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
iowait_starve_clear(ps.pkts_sent, &priv->s_iowait); iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
spin_unlock_irqrestore(&qp->s_lock, ps.flags); spin_unlock_irqrestore(&qp->s_lock, ps.flags);
} }
/*
* This should be called with s_lock held.
*/
void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
last = qp->s_last;
old_last = last;
trace_hfi1_qp_send_completion(qp, wqe, last);
if (++last >= qp->s_size)
last = 0;
trace_hfi1_qp_send_completion(qp, wqe, last);
qp->s_last = last;
/* See post_send() */
barrier();
rvt_put_swqe(wqe);
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
rvt_qp_swqe_complete(qp,
wqe,
ib_hfi1_wc_opcode[wqe->wr.opcode],
status);
if (qp->s_acked == old_last)
qp->s_acked = last;
if (qp->s_cur == old_last)
qp->s_cur = last;
if (qp->s_tail == old_last)
qp->s_tail = last;
if (qp->state == IB_QPS_SQD && last == qp->s_cur)
qp->s_draining = 0;
}
...@@ -88,7 +88,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -88,7 +88,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
} }
clear_ahg(qp); clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last); wqe = rvt_get_swqe_ptr(qp, qp->s_last);
hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done_free_tx; goto done_free_tx;
} }
...@@ -140,7 +140,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -140,7 +140,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp, wqe->wr.ex.invalidate_rkey); qp, wqe->wr.ex.invalidate_rkey);
local_ops = 1; local_ops = 1;
} }
hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
: IB_WC_SUCCESS); : IB_WC_SUCCESS);
if (local_ops) if (local_ops)
atomic_dec(&qp->local_ops_pending); atomic_dec(&qp->local_ops_pending);
......
...@@ -518,7 +518,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -518,7 +518,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail; goto bail;
} }
wqe = rvt_get_swqe_ptr(qp, qp->s_last); wqe = rvt_get_swqe_ptr(qp, qp->s_last);
hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done_free_tx; goto done_free_tx;
} }
...@@ -560,7 +560,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -560,7 +560,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
ud_loopback(qp, wqe); ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, tflags); spin_lock_irqsave(&qp->s_lock, tflags);
ps->flags = tflags; ps->flags = tflags;
hfi1_send_complete(qp, wqe, IB_WC_SUCCESS); rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done_free_tx; goto done_free_tx;
} }
} }
......
...@@ -492,7 +492,7 @@ static void verbs_sdma_complete( ...@@ -492,7 +492,7 @@ static void verbs_sdma_complete(
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
if (tx->wqe) { if (tx->wqe) {
hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS); rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
} else if (qp->ibqp.qp_type == IB_QPT_RC) { } else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct hfi1_opa_header *hdr; struct hfi1_opa_header *hdr;
...@@ -938,7 +938,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -938,7 +938,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
pio_bail: pio_bail:
if (qp->s_wqe) { if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
hfi1_send_complete(qp, qp->s_wqe, wc_status); rvt_send_complete(qp, qp->s_wqe, wc_status);
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) { } else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
...@@ -1145,7 +1145,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -1145,7 +1145,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
hfi1_cdbg(PIO, "%s() Failed. Completing with err", hfi1_cdbg(PIO, "%s() Failed. Completing with err",
__func__); __func__);
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
} }
return -EINVAL; return -EINVAL;
...@@ -1735,6 +1735,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ...@@ -1735,6 +1735,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
/* post send table */ /* post send table */
dd->verbs_dev.rdi.post_parms = hfi1_post_parms; dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
/* opcode translation table */
dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode;
ppd = dd->pport; ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++) for (i = 0; i < dd->num_pports; i++, ppd++)
rvt_init_port(&dd->verbs_dev.rdi, rvt_init_port(&dd->verbs_dev.rdi,
......
...@@ -363,9 +363,6 @@ void hfi1_do_send_from_rvt(struct rvt_qp *qp); ...@@ -363,9 +363,6 @@ void hfi1_do_send_from_rvt(struct rvt_qp *qp);
void hfi1_do_send(struct rvt_qp *qp, bool in_thread); void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status);
void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn); void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn);
int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps); int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
......
...@@ -254,7 +254,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -254,7 +254,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail; goto bail;
} }
wqe = rvt_get_swqe_ptr(qp, qp->s_last); wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */ /* will get called again */
goto done; goto done;
...@@ -838,7 +838,7 @@ void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) ...@@ -838,7 +838,7 @@ void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
qib_migrate_qp(qp); qib_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt; qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) { } else if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return; return;
} else /* XXX need to handle delayed completion */ } else /* XXX need to handle delayed completion */
...@@ -1221,7 +1221,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, ...@@ -1221,7 +1221,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++; ibp->rvp.n_other_naks++;
class_b: class_b:
if (qp->s_last == qp->s_acked) { if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, status); rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
} }
break; break;
...@@ -1492,7 +1492,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, ...@@ -1492,7 +1492,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
status = IB_WC_LOC_LEN_ERR; status = IB_WC_LOC_LEN_ERR;
ack_err: ack_err:
if (qp->s_last == qp->s_acked) { if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, status); rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
} }
ack_done: ack_done:
......
...@@ -403,7 +403,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -403,7 +403,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
ibp->rvp.n_loop_pkts++; ibp->rvp.n_loop_pkts++;
flush_send: flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
qib_send_complete(sqp, wqe, send_status); rvt_send_complete(sqp, wqe, send_status);
goto again; goto again;
rnr_nak: rnr_nak:
...@@ -447,7 +447,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -447,7 +447,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
serr: serr:
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
qib_send_complete(sqp, wqe, send_status); rvt_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) { if (sqp->ibqp.qp_type == IB_QPT_RC) {
int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
...@@ -613,42 +613,3 @@ void qib_do_send(struct rvt_qp *qp) ...@@ -613,42 +613,3 @@ void qib_do_send(struct rvt_qp *qp)
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
} }
/*
* This should be called with s_lock held.
*/
void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
last = qp->s_last;
old_last = last;
if (++last >= qp->s_size)
last = 0;
qp->s_last = last;
/* See post_send() */
barrier();
rvt_put_swqe(wqe);
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
rvt_qp_swqe_complete(qp,
wqe,
ib_qib_wc_opcode[wqe->wr.opcode],
status);
if (qp->s_acked == old_last)
qp->s_acked = last;
if (qp->s_cur == old_last)
qp->s_cur = last;
if (qp->s_tail == old_last)
qp->s_tail = last;
if (qp->state == IB_QPS_SQD && last == qp->s_cur)
qp->s_draining = 0;
}
...@@ -651,7 +651,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd, ...@@ -651,7 +651,7 @@ int qib_sdma_verbs_send(struct qib_pportdata *ppd,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
rvt_error_qp(qp, IB_WC_GENERAL_ERR); rvt_error_qp(qp, IB_WC_GENERAL_ERR);
} else if (qp->s_wqe) } else if (qp->s_wqe)
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock); spin_unlock(&qp->s_lock);
spin_unlock(&qp->r_lock); spin_unlock(&qp->r_lock);
/* return zero to process the next send work request */ /* return zero to process the next send work request */
......
...@@ -68,7 +68,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -68,7 +68,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail; goto bail;
} }
wqe = rvt_get_swqe_ptr(qp, qp->s_last); wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done; goto done;
} }
......
...@@ -260,7 +260,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -260,7 +260,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
goto bail; goto bail;
} }
wqe = rvt_get_swqe_ptr(qp, qp->s_last); wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done; goto done;
} }
...@@ -304,7 +304,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -304,7 +304,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
qib_ud_loopback(qp, wqe); qib_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, tflags); spin_lock_irqsave(&qp->s_lock, tflags);
*flags = tflags; *flags = tflags;
qib_send_complete(qp, wqe, IB_WC_SUCCESS); rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done; goto done;
} }
} }
......
...@@ -731,7 +731,7 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) ...@@ -731,7 +731,7 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
if (tx->wqe) if (tx->wqe)
qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
else if (qp->ibqp.qp_type == IB_QPT_RC) { else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct ib_header *hdr; struct ib_header *hdr;
...@@ -1004,7 +1004,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr, ...@@ -1004,7 +1004,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
} }
if (qp->s_wqe) { if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) { } else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
...@@ -1491,6 +1491,9 @@ static void qib_fill_device_attr(struct qib_devdata *dd) ...@@ -1491,6 +1491,9 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_mcast_grp; rdi->dparms.props.max_mcast_grp;
/* post send table */ /* post send table */
dd->verbs_dev.rdi.post_parms = qib_post_parms; dd->verbs_dev.rdi.post_parms = qib_post_parms;
/* opcode translation table */
dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode;
} }
/** /**
......
...@@ -331,9 +331,6 @@ void _qib_do_send(struct work_struct *work); ...@@ -331,9 +331,6 @@ void _qib_do_send(struct work_struct *work);
void qib_do_send(struct rvt_qp *qp); void qib_do_send(struct rvt_qp *qp);
void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status);
void qib_send_rc_ack(struct rvt_qp *qp); void qib_send_rc_ack(struct rvt_qp *qp);
int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags); int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
......
...@@ -2658,6 +2658,49 @@ void rvt_qp_iter(struct rvt_dev_info *rdi, ...@@ -2658,6 +2658,49 @@ void rvt_qp_iter(struct rvt_dev_info *rdi,
} }
EXPORT_SYMBOL(rvt_qp_iter); EXPORT_SYMBOL(rvt_qp_iter);
/*
* This should be called with s_lock held.
*/
void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
last = qp->s_last;
old_last = last;
trace_rvt_qp_send_completion(qp, wqe, last);
if (++last >= qp->s_size)
last = 0;
trace_rvt_qp_send_completion(qp, wqe, last);
qp->s_last = last;
/* See post_send() */
barrier();
rvt_put_swqe(wqe);
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
rvt_qp_swqe_complete(qp,
wqe,
rdi->wc_opcode[wqe->wr.opcode],
status);
if (qp->s_acked == old_last)
qp->s_acked = last;
if (qp->s_cur == old_last)
qp->s_cur = last;
if (qp->s_tail == old_last)
qp->s_tail = last;
if (qp->state == IB_QPS_SQD && last == qp->s_cur)
qp->s_draining = 0;
}
EXPORT_SYMBOL(rvt_send_complete);
/** /**
* rvt_copy_sge - copy data to SGE memory * rvt_copy_sge - copy data to SGE memory
* @qp: associated QP * @qp: associated QP
......
...@@ -153,6 +153,48 @@ TRACE_EVENT( ...@@ -153,6 +153,48 @@ TRACE_EVENT(
) )
); );
TRACE_EVENT(
rvt_qp_send_completion,
TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
TP_ARGS(qp, wqe, idx),
TP_STRUCT__entry(
RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
__field(struct rvt_swqe *, wqe)
__field(u64, wr_id)
__field(u32, qpn)
__field(u32, qpt)
__field(u32, length)
__field(u32, idx)
__field(u32, ssn)
__field(enum ib_wr_opcode, opcode)
__field(int, send_flags)
),
TP_fast_assign(
RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
__entry->wqe = wqe;
__entry->wr_id = wqe->wr.wr_id;
__entry->qpn = qp->ibqp.qp_num;
__entry->qpt = qp->ibqp.qp_type;
__entry->length = wqe->length;
__entry->idx = idx;
__entry->ssn = wqe->ssn;
__entry->opcode = wqe->wr.opcode;
__entry->send_flags = wqe->wr.send_flags;
),
TP_printk(
"[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
__get_str(dev),
__entry->qpn,
__entry->qpt,
__entry->wqe,
__entry->idx,
__entry->wr_id,
__entry->length,
__entry->ssn,
__entry->opcode,
__entry->send_flags
)
);
#endif /* __RVT_TRACE_TX_H */ #endif /* __RVT_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
......
...@@ -398,6 +398,9 @@ struct rvt_dev_info { ...@@ -398,6 +398,9 @@ struct rvt_dev_info {
/* post send table */ /* post send table */
const struct rvt_operation_params *post_parms; const struct rvt_operation_params *post_parms;
/* opcode translation table */
const enum ib_wc_opcode *wc_opcode;
/* Driver specific helper functions */ /* Driver specific helper functions */
struct rvt_driver_provided driver_f; struct rvt_driver_provided driver_f;
......
...@@ -681,6 +681,8 @@ void rvt_add_retry_timer(struct rvt_qp *qp); ...@@ -681,6 +681,8 @@ void rvt_add_retry_timer(struct rvt_qp *qp);
void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
void *data, u32 length, void *data, u32 length,
bool release, bool copy_last); bool release, bool copy_last);
void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status);
/** /**
* struct rvt_qp_iter - the iterator for QPs * struct rvt_qp_iter - the iterator for QPs
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment