Commit 02adbecf authored by Vijay Immanuel's avatar Vijay Immanuel Committed by Greg Kroah-Hartman

IB/rxe: fixes for rdma read retry

[ Upstream commit 030e46e4 ]

When a read request is retried for the remaining partial
data, the response may restart from read response first
or read response only. So support those cases.

Do not advance the comp psn beyond the current wqe's last_psn
as that could skip over an entire read wqe and will cause the
req_retry() logic to set an incorrect req psn.
An example sequence is as follows:
Write        PSN 40 -- this is the current WQE.
Read request PSN 41
Write        PSN 42
Receive ACK  PSN 42 -- this will complete the current WQE
for PSN 40, and set the comp psn to 42 which is a problem
because the read request at PSN 41 has been skipped over.
So when req_retry() tries to retransmit the read request,
it sets the req psn to 42 which is incorrect.

When retrying a read request, calculate the number of psns
completed based on the dma resid instead of the wqe first_psn.
The wqe first_psn could have moved if the read request was
retried multiple times.

Set the reth length to the dma resid to handle read retries for
the remaining partial data.
Signed-off-by: default avatarVijay Immanuel <vijayi@attalasystems.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent cad88967
...@@ -254,6 +254,17 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, ...@@ -254,6 +254,17 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE && if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) { pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
/* read retries of partial data may restart from
* read response first or response only.
*/
if ((pkt->psn == wqe->first_psn &&
pkt->opcode ==
IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
(wqe->first_psn == wqe->last_psn &&
pkt->opcode ==
IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
break;
return COMPST_ERROR; return COMPST_ERROR;
} }
break; break;
...@@ -500,11 +511,11 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp, ...@@ -500,11 +511,11 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
struct rxe_pkt_info *pkt, struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe) struct rxe_send_wqe *wqe)
{ {
qp->comp.opcode = -1; if (pkt && wqe->state == wqe_state_pending) {
if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
if (pkt) { qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
if (psn_compare(pkt->psn, qp->comp.psn) >= 0) qp->comp.opcode = -1;
qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK; }
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
......
...@@ -73,9 +73,6 @@ static void req_retry(struct rxe_qp *qp) ...@@ -73,9 +73,6 @@ static void req_retry(struct rxe_qp *qp)
int npsn; int npsn;
int first = 1; int first = 1;
wqe = queue_head(qp->sq.queue);
npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
qp->req.wqe_index = consumer_index(qp->sq.queue); qp->req.wqe_index = consumer_index(qp->sq.queue);
qp->req.psn = qp->comp.psn; qp->req.psn = qp->comp.psn;
qp->req.opcode = -1; qp->req.opcode = -1;
...@@ -107,11 +104,17 @@ static void req_retry(struct rxe_qp *qp) ...@@ -107,11 +104,17 @@ static void req_retry(struct rxe_qp *qp)
if (first) { if (first) {
first = 0; first = 0;
if (mask & WR_WRITE_OR_SEND_MASK) if (mask & WR_WRITE_OR_SEND_MASK) {
npsn = (qp->comp.psn - wqe->first_psn) &
BTH_PSN_MASK;
retry_first_write_send(qp, wqe, mask, npsn); retry_first_write_send(qp, wqe, mask, npsn);
}
if (mask & WR_READ_MASK) if (mask & WR_READ_MASK) {
npsn = (wqe->dma.length - wqe->dma.resid) /
qp->mtu;
wqe->iova += npsn * qp->mtu; wqe->iova += npsn * qp->mtu;
}
} }
wqe->state = wqe_state_posted; wqe->state = wqe_state_posted;
...@@ -435,7 +438,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, ...@@ -435,7 +438,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (pkt->mask & RXE_RETH_MASK) { if (pkt->mask & RXE_RETH_MASK) {
reth_set_rkey(pkt, ibwr->wr.rdma.rkey); reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
reth_set_va(pkt, wqe->iova); reth_set_va(pkt, wqe->iova);
reth_set_len(pkt, wqe->dma.length); reth_set_len(pkt, wqe->dma.resid);
} }
if (pkt->mask & RXE_IMMDT_MASK) if (pkt->mask & RXE_IMMDT_MASK)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment