Commit f455a1bc authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Make tasks schedule each other

Replace rxe_run_task() by rxe_sched_task() when tasks call each other.
These are not performance critical and mainly involve error paths but they
run the risk of causing deadlocks.

Link: https://lore.kernel.org/r/20230304174533.11296-8-rpearsonhpe@gmail.comSigned-off-by: default avatarIan Ziemba <ian.ziemba@hpe.com>
Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 960ebe97
...@@ -322,7 +322,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, ...@@ -322,7 +322,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn; qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task); rxe_sched_task(&qp->req.task);
} }
} }
return COMPST_ERROR_RETRY; return COMPST_ERROR_RETRY;
...@@ -473,7 +473,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) ...@@ -473,7 +473,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/ */
if (qp->req.wait_fence) { if (qp->req.wait_fence) {
qp->req.wait_fence = 0; qp->req.wait_fence = 0;
rxe_run_task(&qp->req.task); rxe_sched_task(&qp->req.task);
} }
} }
...@@ -487,7 +487,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, ...@@ -487,7 +487,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) { if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0; qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0; qp->req.need_rd_atomic = 0;
rxe_run_task(&qp->req.task); rxe_sched_task(&qp->req.task);
} }
} }
...@@ -767,7 +767,7 @@ int rxe_completer(struct rxe_qp *qp) ...@@ -767,7 +767,7 @@ int rxe_completer(struct rxe_qp *qp)
RXE_CNT_COMP_RETRY); RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->comp.started_retry = 1; qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task); rxe_sched_task(&qp->req.task);
} }
goto done; goto done;
......
...@@ -753,7 +753,7 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -753,7 +753,7 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index); qp->req.wqe_index);
wqe->state = wqe_state_done; wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS; wqe->status = IB_WC_SUCCESS;
rxe_run_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
goto done; goto done;
} }
payload = mtu; payload = mtu;
...@@ -837,7 +837,7 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -837,7 +837,7 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error; wqe->state = wqe_state_error;
qp->req.state = QP_STATE_ERROR; qp->req.state = QP_STATE_ERROR;
rxe_run_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
exit: exit:
ret = -EAGAIN; ret = -EAGAIN;
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment