Commit 220e8428 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Move atomic responder res to atomic_reply

Move the allocation of the atomic responder resource up into
rxe_atomic_reply() from send_atomic_ack(). In preparation for merging the
normal and retry atomic responder flows.

Link: https://lore.kernel.org/r/20220606143836.3323-4-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 0ed5493e
...@@ -554,12 +554,36 @@ static enum resp_states write_data_in(struct rxe_qp *qp, ...@@ -554,12 +554,36 @@ static enum resp_states write_data_in(struct rxe_qp *qp,
/* Guarantee atomicity of atomic operations at the machine level. */ /* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock); static DEFINE_SPINLOCK(atomic_ops_lock);
static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
struct resp_res *res;
res = &qp->resp.resources[qp->resp.res_head];
rxe_advance_resp_resource(qp);
free_rd_atomic_resource(qp, res);
res->type = RXE_ATOMIC_MASK;
res->first_psn = pkt->psn;
res->last_psn = pkt->psn;
res->cur_psn = pkt->psn;
res->replay = 0;
return res;
}
static enum resp_states rxe_atomic_reply(struct rxe_qp *qp, static enum resp_states rxe_atomic_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt) struct rxe_pkt_info *pkt)
{ {
u64 *vaddr; u64 *vaddr;
enum resp_states ret; enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr; struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
if (!res) {
res = rxe_prepare_atomic_res(qp, pkt);
qp->resp.res = res;
}
if (mr->state != RXE_MR_STATE_VALID) { if (mr->state != RXE_MR_STATE_VALID) {
ret = RESPST_ERR_RKEY_VIOLATION; ret = RESPST_ERR_RKEY_VIOLATION;
...@@ -1026,32 +1050,15 @@ static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn) ...@@ -1026,32 +1050,15 @@ static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
return err; return err;
} }
static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp, struct rxe_pkt_info *pkt) static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
struct resp_res *res;
res = &qp->resp.resources[qp->resp.res_head];
rxe_advance_resp_resource(qp);
free_rd_atomic_resource(qp, res);
res->type = RXE_ATOMIC_MASK;
res->first_psn = pkt->psn;
res->last_psn = pkt->psn;
res->cur_psn = pkt->psn;
return res;
}
static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
u8 syndrome)
{ {
int err = 0; int err = 0;
struct rxe_pkt_info ack_pkt; struct rxe_pkt_info ack_pkt;
struct sk_buff *skb; struct sk_buff *skb;
struct resp_res *res; struct resp_res *res = qp->resp.res;
skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
0, pkt->psn, syndrome); 0, psn, syndrome);
if (!skb) { if (!skb) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
...@@ -1059,7 +1066,6 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -1059,7 +1066,6 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
skb_get(skb); skb_get(skb);
res = rxe_prepare_atomic_res(qp, pkt);
res->atomic.skb = skb; res->atomic.skb = skb;
err = rxe_xmit_packet(qp, &ack_pkt, skb); err = rxe_xmit_packet(qp, &ack_pkt, skb);
...@@ -1067,6 +1073,11 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -1067,6 +1073,11 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
pr_err_ratelimited("Failed sending ack\n"); pr_err_ratelimited("Failed sending ack\n");
rxe_put(qp); rxe_put(qp);
} }
/* have to clear this since it is used to trigger
* long read replies
*/
qp->resp.res = NULL;
out: out:
return err; return err;
} }
...@@ -1080,7 +1091,7 @@ static enum resp_states acknowledge(struct rxe_qp *qp, ...@@ -1080,7 +1091,7 @@ static enum resp_states acknowledge(struct rxe_qp *qp,
if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED) if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
send_ack(qp, qp->resp.aeth_syndrome, pkt->psn); send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
else if (pkt->mask & RXE_ATOMIC_MASK) else if (pkt->mask & RXE_ATOMIC_MASK)
send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED); send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
else if (bth_ack(pkt)) else if (bth_ack(pkt))
send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn); send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment