Commit 1fec77bf authored by Bart Van Assche's avatar Bart Van Assche Committed by Jason Gunthorpe

RDMA/core: Simplify ib_post_(send|recv|srq_recv)() calls

Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL
as third argument to ib_post_(send|recv|srq_recv)().
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent bb039a87
...@@ -1181,7 +1181,6 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -1181,7 +1181,6 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
{ {
struct ib_mad_qp_info *qp_info; struct ib_mad_qp_info *qp_info;
struct list_head *list; struct list_head *list;
struct ib_send_wr *bad_send_wr;
struct ib_mad_agent *mad_agent; struct ib_mad_agent *mad_agent;
struct ib_sge *sge; struct ib_sge *sge;
unsigned long flags; unsigned long flags;
...@@ -1219,7 +1218,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -1219,7 +1218,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
spin_lock_irqsave(&qp_info->send_queue.lock, flags); spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) { if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
&bad_send_wr); NULL);
list = &qp_info->send_queue.list; list = &qp_info->send_queue.list;
} else { } else {
ret = 0; ret = 0;
...@@ -2476,7 +2475,6 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -2476,7 +2475,6 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr; struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
struct ib_mad_qp_info *qp_info; struct ib_mad_qp_info *qp_info;
struct ib_mad_queue *send_queue; struct ib_mad_queue *send_queue;
struct ib_send_wr *bad_send_wr;
struct ib_mad_send_wc mad_send_wc; struct ib_mad_send_wc mad_send_wc;
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -2526,7 +2524,7 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -2526,7 +2524,7 @@ static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
if (queued_send_wr) { if (queued_send_wr) {
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
&bad_send_wr); NULL);
if (ret) { if (ret) {
dev_err(&port_priv->device->dev, dev_err(&port_priv->device->dev,
"ib_post_send failed: %d\n", ret); "ib_post_send failed: %d\n", ret);
...@@ -2571,11 +2569,9 @@ static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, ...@@ -2571,11 +2569,9 @@ static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
if (wc->status == IB_WC_WR_FLUSH_ERR) { if (wc->status == IB_WC_WR_FLUSH_ERR) {
if (mad_send_wr->retry) { if (mad_send_wr->retry) {
/* Repost send */ /* Repost send */
struct ib_send_wr *bad_send_wr;
mad_send_wr->retry = 0; mad_send_wr->retry = 0;
ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
&bad_send_wr); NULL);
if (!ret) if (!ret)
return false; return false;
} }
...@@ -2891,7 +2887,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, ...@@ -2891,7 +2887,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
int post, ret; int post, ret;
struct ib_mad_private *mad_priv; struct ib_mad_private *mad_priv;
struct ib_sge sg_list; struct ib_sge sg_list;
struct ib_recv_wr recv_wr, *bad_recv_wr; struct ib_recv_wr recv_wr;
struct ib_mad_queue *recv_queue = &qp_info->recv_queue; struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
/* Initialize common scatter list fields */ /* Initialize common scatter list fields */
...@@ -2935,7 +2931,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, ...@@ -2935,7 +2931,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
post = (++recv_queue->count < recv_queue->max_active); post = (++recv_queue->count < recv_queue->max_active);
list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
spin_unlock_irqrestore(&recv_queue->lock, flags); spin_unlock_irqrestore(&recv_queue->lock, flags);
ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
if (ret) { if (ret) {
spin_lock_irqsave(&recv_queue->lock, flags); spin_lock_irqsave(&recv_queue->lock, flags);
list_del(&mad_priv->header.mad_list.list); list_del(&mad_priv->header.mad_list.list);
......
...@@ -564,10 +564,10 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs); ...@@ -564,10 +564,10 @@ EXPORT_SYMBOL(rdma_rw_ctx_wrs);
int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr) struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
{ {
struct ib_send_wr *first_wr, *bad_wr; struct ib_send_wr *first_wr;
first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr); first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
return ib_post_send(qp, first_wr, &bad_wr); return ib_post_send(qp, first_wr, NULL);
} }
EXPORT_SYMBOL(rdma_rw_ctx_post); EXPORT_SYMBOL(rdma_rw_ctx_post);
......
...@@ -2473,7 +2473,6 @@ static void __ib_drain_sq(struct ib_qp *qp) ...@@ -2473,7 +2473,6 @@ static void __ib_drain_sq(struct ib_qp *qp)
struct ib_cq *cq = qp->send_cq; struct ib_cq *cq = qp->send_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe sdrain; struct ib_drain_cqe sdrain;
struct ib_send_wr *bad_swr;
struct ib_rdma_wr swr = { struct ib_rdma_wr swr = {
.wr = { .wr = {
.next = NULL, .next = NULL,
...@@ -2492,7 +2491,7 @@ static void __ib_drain_sq(struct ib_qp *qp) ...@@ -2492,7 +2491,7 @@ static void __ib_drain_sq(struct ib_qp *qp)
sdrain.cqe.done = ib_drain_qp_done; sdrain.cqe.done = ib_drain_qp_done;
init_completion(&sdrain.done); init_completion(&sdrain.done);
ret = ib_post_send(qp, &swr.wr, &bad_swr); ret = ib_post_send(qp, &swr.wr, NULL);
if (ret) { if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return; return;
...@@ -2513,7 +2512,7 @@ static void __ib_drain_rq(struct ib_qp *qp) ...@@ -2513,7 +2512,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
struct ib_cq *cq = qp->recv_cq; struct ib_cq *cq = qp->recv_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe rdrain; struct ib_drain_cqe rdrain;
struct ib_recv_wr rwr = {}, *bad_rwr; struct ib_recv_wr rwr = {};
int ret; int ret;
ret = ib_modify_qp(qp, &attr, IB_QP_STATE); ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
...@@ -2526,7 +2525,7 @@ static void __ib_drain_rq(struct ib_qp *qp) ...@@ -2526,7 +2525,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
rdrain.cqe.done = ib_drain_qp_done; rdrain.cqe.done = ib_drain_qp_done;
init_completion(&rdrain.done); init_completion(&rdrain.done);
ret = ib_post_recv(qp, &rwr, &bad_rwr); ret = ib_post_recv(qp, &rwr, NULL);
if (ret) { if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment