Commit 73930595 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jason Gunthorpe

fs/cifs: Simplify ib_post_(send|recv|srq_recv)() calls

Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL
as third argument to ib_post_(send|recv|srq_recv)().
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 23f96d1f
...@@ -801,7 +801,7 @@ static int smbd_ia_open( ...@@ -801,7 +801,7 @@ static int smbd_ia_open(
*/ */
static int smbd_post_send_negotiate_req(struct smbd_connection *info) static int smbd_post_send_negotiate_req(struct smbd_connection *info)
{ {
struct ib_send_wr send_wr, *send_wr_fail; struct ib_send_wr send_wr;
int rc = -ENOMEM; int rc = -ENOMEM;
struct smbd_request *request; struct smbd_request *request;
struct smbd_negotiate_req *packet; struct smbd_negotiate_req *packet;
...@@ -853,7 +853,7 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info) ...@@ -853,7 +853,7 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
request->has_payload = false; request->has_payload = false;
atomic_inc(&info->send_pending); atomic_inc(&info->send_pending);
rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail); rc = ib_post_send(info->id->qp, &send_wr, NULL);
if (!rc) if (!rc)
return 0; return 0;
...@@ -1023,7 +1023,7 @@ static void smbd_destroy_header(struct smbd_connection *info, ...@@ -1023,7 +1023,7 @@ static void smbd_destroy_header(struct smbd_connection *info,
static int smbd_post_send(struct smbd_connection *info, static int smbd_post_send(struct smbd_connection *info,
struct smbd_request *request, bool has_payload) struct smbd_request *request, bool has_payload)
{ {
struct ib_send_wr send_wr, *send_wr_fail; struct ib_send_wr send_wr;
int rc, i; int rc, i;
for (i = 0; i < request->num_sge; i++) { for (i = 0; i < request->num_sge; i++) {
...@@ -1054,7 +1054,7 @@ static int smbd_post_send(struct smbd_connection *info, ...@@ -1054,7 +1054,7 @@ static int smbd_post_send(struct smbd_connection *info,
atomic_inc(&info->send_pending); atomic_inc(&info->send_pending);
} }
rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail); rc = ib_post_send(info->id->qp, &send_wr, NULL);
if (rc) { if (rc) {
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc); log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
if (has_payload) { if (has_payload) {
...@@ -1183,7 +1183,7 @@ static int smbd_post_send_data( ...@@ -1183,7 +1183,7 @@ static int smbd_post_send_data(
static int smbd_post_recv( static int smbd_post_recv(
struct smbd_connection *info, struct smbd_response *response) struct smbd_connection *info, struct smbd_response *response)
{ {
struct ib_recv_wr recv_wr, *recv_wr_fail = NULL; struct ib_recv_wr recv_wr;
int rc = -EIO; int rc = -EIO;
response->sge.addr = ib_dma_map_single( response->sge.addr = ib_dma_map_single(
...@@ -1202,7 +1202,7 @@ static int smbd_post_recv( ...@@ -1202,7 +1202,7 @@ static int smbd_post_recv(
recv_wr.sg_list = &response->sge; recv_wr.sg_list = &response->sge;
recv_wr.num_sge = 1; recv_wr.num_sge = 1;
rc = ib_post_recv(info->id->qp, &recv_wr, &recv_wr_fail); rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
if (rc) { if (rc) {
ib_dma_unmap_single(info->id->device, response->sge.addr, ib_dma_unmap_single(info->id->device, response->sge.addr,
response->sge.length, DMA_FROM_DEVICE); response->sge.length, DMA_FROM_DEVICE);
...@@ -2488,7 +2488,6 @@ struct smbd_mr *smbd_register_mr( ...@@ -2488,7 +2488,6 @@ struct smbd_mr *smbd_register_mr(
int rc, i; int rc, i;
enum dma_data_direction dir; enum dma_data_direction dir;
struct ib_reg_wr *reg_wr; struct ib_reg_wr *reg_wr;
struct ib_send_wr *bad_wr;
if (num_pages > info->max_frmr_depth) { if (num_pages > info->max_frmr_depth) {
log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n", log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
...@@ -2562,7 +2561,7 @@ struct smbd_mr *smbd_register_mr( ...@@ -2562,7 +2561,7 @@ struct smbd_mr *smbd_register_mr(
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
* on the next ib_post_send when we actaully send I/O to remote peer * on the next ib_post_send when we actaully send I/O to remote peer
*/ */
rc = ib_post_send(info->id->qp, &reg_wr->wr, &bad_wr); rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
if (!rc) if (!rc)
return smbdirect_mr; return smbdirect_mr;
...@@ -2607,7 +2606,7 @@ static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -2607,7 +2606,7 @@ static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
*/ */
int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
{ {
struct ib_send_wr *wr, *bad_wr; struct ib_send_wr *wr;
struct smbd_connection *info = smbdirect_mr->conn; struct smbd_connection *info = smbdirect_mr->conn;
int rc = 0; int rc = 0;
...@@ -2622,7 +2621,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr) ...@@ -2622,7 +2621,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
wr->send_flags = IB_SEND_SIGNALED; wr->send_flags = IB_SEND_SIGNALED;
init_completion(&smbdirect_mr->invalidate_done); init_completion(&smbdirect_mr->invalidate_done);
rc = ib_post_send(info->id->qp, wr, &bad_wr); rc = ib_post_send(info->id->qp, wr, NULL);
if (rc) { if (rc) {
log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc); log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
smbd_disconnect_rdma_connection(info); smbd_disconnect_rdma_connection(info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment