Commit cb5a967f authored by Benjamin Coddington's avatar Benjamin Coddington Committed by Trond Myklebust

xprtrdma: Fix a maybe-uninitialized compiler warning

This minor fix-up keeps GCC from complaining that "last' may be used
uninitialized", which breaks some build workflows that have been running
with all warnings treated as errors.
Signed-off-by: default avatarBenjamin Coddington <bcodding@redhat.com>
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent 8791545e
...@@ -515,8 +515,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -515,8 +515,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* a single ib_post_send() call. * a single ib_post_send() call.
*/ */
prev = &first; prev = &first;
while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { mr = rpcrdma_mr_pop(&req->rl_registered);
do {
trace_xprtrdma_mr_localinv(mr); trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++; r_xprt->rx_stats.local_inv_needed++;
...@@ -533,7 +533,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -533,7 +533,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*prev = last; *prev = last;
prev = &last->next; prev = &last->next;
} } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
mr = container_of(last, struct rpcrdma_mr, mr_invwr); mr = container_of(last, struct rpcrdma_mr, mr_invwr);
/* Strong send queue ordering guarantees that when the /* Strong send queue ordering guarantees that when the
...@@ -617,8 +618,8 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -617,8 +618,8 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* a single ib_post_send() call. * a single ib_post_send() call.
*/ */
prev = &first; prev = &first;
while ((mr = rpcrdma_mr_pop(&req->rl_registered))) { mr = rpcrdma_mr_pop(&req->rl_registered);
do {
trace_xprtrdma_mr_localinv(mr); trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++; r_xprt->rx_stats.local_inv_needed++;
...@@ -635,7 +636,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -635,7 +636,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*prev = last; *prev = last;
prev = &last->next; prev = &last->next;
} } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
/* Strong send queue ordering guarantees that when the /* Strong send queue ordering guarantees that when the
* last WR in the chain completes, all WRs in the chain * last WR in the chain completes, all WRs in the chain
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment