Commit 5fc24e60 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/rxe: Fix compile warnings on 32-bit

Move the conditional code into a function, with two varients so it is
harder to make these kinds of mistakes.

 drivers/infiniband/sw/rxe/rxe_resp.c: In function 'atomic_write_reply':
 drivers/infiniband/sw/rxe/rxe_resp.c:794:13: error: unused variable 'payload' [-Werror=unused-variable]
   794 |         int payload = payload_size(pkt);
       |             ^~~~~~~
 drivers/infiniband/sw/rxe/rxe_resp.c:793:24: error: unused variable 'mr' [-Werror=unused-variable]
   793 |         struct rxe_mr *mr = qp->resp.mr;
       |                        ^~
 drivers/infiniband/sw/rxe/rxe_resp.c:791:19: error: unused variable 'dst' [-Werror=unused-variable]
   791 |         u64 src, *dst;
       |                   ^~~
 drivers/infiniband/sw/rxe/rxe_resp.c:791:13: error: unused variable 'src' [-Werror=unused-variable]
   791 |         u64 src, *dst;

Fixes: 034e285f ("RDMA/rxe: Make responder support atomic write on RC service")
Link: https://lore.kernel.org/linux-rdma/Y5s+EVE7eLWQqOwv@nvidia.com/Reported-by: default avatarGuenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent dbc94a0f
...@@ -785,53 +785,61 @@ static enum resp_states atomic_reply(struct rxe_qp *qp, ...@@ -785,53 +785,61 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
return ret; return ret;
} }
static enum resp_states atomic_write_reply(struct rxe_qp *qp, #ifdef CONFIG_64BIT
struct rxe_pkt_info *pkt) static enum resp_states do_atomic_write(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{ {
u64 src, *dst;
struct resp_res *res = qp->resp.res;
struct rxe_mr *mr = qp->resp.mr; struct rxe_mr *mr = qp->resp.mr;
int payload = payload_size(pkt); int payload = payload_size(pkt);
u64 src, *dst;
if (!res) { if (mr->state != RXE_MR_STATE_VALID)
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK); return RESPST_ERR_RKEY_VIOLATION;
qp->resp.res = res;
}
if (!res->replay) {
#ifdef CONFIG_64BIT
if (mr->state != RXE_MR_STATE_VALID)
return RESPST_ERR_RKEY_VIOLATION;
memcpy(&src, payload_addr(pkt), payload);
dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload); memcpy(&src, payload_addr(pkt), payload);
/* check vaddr is 8 bytes aligned. */
if (!dst || (uintptr_t)dst & 7)
return RESPST_ERR_MISALIGNED_ATOMIC;
/* Do atomic write after all prior operations have completed */ dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload);
smp_store_release(dst, src); /* check vaddr is 8 bytes aligned. */
if (!dst || (uintptr_t)dst & 7)
return RESPST_ERR_MISALIGNED_ATOMIC;
/* decrease resp.resid to zero */ /* Do atomic write after all prior operations have completed */
qp->resp.resid -= sizeof(payload); smp_store_release(dst, src);
qp->resp.msn++; /* decrease resp.resid to zero */
qp->resp.resid -= sizeof(payload);
/* next expected psn, read handles this separately */ qp->resp.msn++;
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
qp->resp.ack_psn = qp->resp.psn;
qp->resp.opcode = pkt->opcode; /* next expected psn, read handles this separately */
qp->resp.status = IB_WC_SUCCESS; qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
qp->resp.ack_psn = qp->resp.psn;
return RESPST_ACKNOWLEDGE; qp->resp.opcode = pkt->opcode;
qp->resp.status = IB_WC_SUCCESS;
return RESPST_ACKNOWLEDGE;
}
#else #else
return RESPST_ERR_UNSUPPORTED_OPCODE; static enum resp_states do_atomic_write(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
return RESPST_ERR_UNSUPPORTED_OPCODE;
}
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
static enum resp_states atomic_write_reply(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
struct resp_res *res = qp->resp.res;
if (!res) {
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
qp->resp.res = res;
} }
return RESPST_ACKNOWLEDGE; if (res->replay)
return RESPST_ACKNOWLEDGE;
return do_atomic_write(qp, pkt);
} }
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp, static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment