Commit 6e38fca6 authored by Kaike Wan's avatar Kaike Wan Committed by Doug Ledford

IB/hfi1: Resend the TID RDMA WRITE DATA packets

This patch adds the logic to resend TID RDMA WRITE DATA packets.
The tracking indices will be reset properly so that the correct
TID entries will be used.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarMitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 7cf0ad67
...@@ -3059,8 +3059,9 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -3059,8 +3059,9 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
{ {
struct tid_rdma_request *req = wqe_to_tid_req(wqe); struct tid_rdma_request *req = wqe_to_tid_req(wqe);
struct tid_rdma_flow *flow; struct tid_rdma_flow *flow;
int diff; struct hfi1_qp_priv *qpriv = qp->priv;
u32 tididx = 0; int diff, delta_pkts;
u32 tididx = 0, i;
u16 fidx; u16 fidx;
if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) { if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
...@@ -3076,11 +3077,20 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -3076,11 +3077,20 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
return; return;
} }
} else { } else {
return; fidx = req->acked_tail;
flow = &req->flows[fidx];
*bth2 = mask_psn(req->r_ack_psn);
} }
if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn);
else
delta_pkts = delta_psn(*bth2,
full_flow_psn(flow,
flow->flow_state.spsn));
trace_hfi1_tid_flow_restart_req(qp, fidx, flow); trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
diff = delta_psn(*bth2, flow->flow_state.ib_spsn); diff = delta_pkts + flow->resync_npkts;
flow->sent = 0; flow->sent = 0;
flow->pkt = 0; flow->pkt = 0;
...@@ -3104,6 +3114,18 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -3104,6 +3114,18 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
break; break;
} }
} }
if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
flow->sent, 0);
/*
* Packet PSN is based on flow_state.spsn + flow->pkt. However,
* during a RESYNC, the generation is incremented and the
* sequence is reset to 0. Since we've adjusted the npkts in the
* flow and the SGE has been sufficiently advanced, we have to
* adjust flow->pkt in order to calculate the correct PSN.
*/
flow->pkt -= flow->resync_npkts;
}
if (flow->tid_offset == if (flow->tid_offset ==
EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) { EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
...@@ -3111,13 +3133,42 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -3111,13 +3133,42 @@ void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
flow->tid_offset = 0; flow->tid_offset = 0;
} }
flow->tid_idx = tididx; flow->tid_idx = tididx;
if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
/* Move flow_idx to correct index */ /* Move flow_idx to correct index */
req->flow_idx = fidx; req->flow_idx = fidx;
else
req->clear_tail = fidx;
trace_hfi1_tid_flow_restart_req(qp, fidx, flow); trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn, trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
wqe->lpsn, req); wqe->lpsn, req);
req->state = TID_REQUEST_ACTIVE; req->state = TID_REQUEST_ACTIVE;
if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
/* Reset all the flows that we are going to resend */
fidx = CIRC_NEXT(fidx, MAX_FLOWS);
i = qpriv->s_tid_tail;
do {
for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
req->flows[fidx].sent = 0;
req->flows[fidx].pkt = 0;
req->flows[fidx].tid_idx = 0;
req->flows[fidx].tid_offset = 0;
req->flows[fidx].resync_npkts = 0;
}
if (i == qpriv->s_tid_cur)
break;
do {
i = (++i == qp->s_size ? 0 : i);
wqe = rvt_get_swqe_ptr(qp, i);
} while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE);
req = wqe_to_tid_req(wqe);
req->cur_seg = req->ack_seg;
fidx = req->acked_tail;
/* Pull req->clear_tail back */
req->clear_tail = fidx;
} while (1);
}
} }
void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp) void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp)
......
...@@ -171,6 +171,7 @@ struct hfi1_qp_priv { ...@@ -171,6 +171,7 @@ struct hfi1_qp_priv {
struct tid_rdma_qp_params tid_rdma; struct tid_rdma_qp_params tid_rdma;
struct rvt_qp *owner; struct rvt_qp *owner;
u8 hdr_type; /* 9B or 16B */ u8 hdr_type; /* 9B or 16B */
struct rvt_sge_state tid_ss; /* SGE state pointer for 2nd leg */
atomic_t n_tid_requests; /* # of sent TID RDMA requests */ atomic_t n_tid_requests; /* # of sent TID RDMA requests */
unsigned long tid_timer_timeout_jiffies; unsigned long tid_timer_timeout_jiffies;
unsigned long tid_retry_timeout_jiffies; unsigned long tid_retry_timeout_jiffies;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment