Commit eab30c4d authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: deduplicate failing task_work_add

When io_req_task_work_add() fails, the request will be cancelled by
enqueueing via task_works of io-wq. Extract a function for that.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 02b23a9a
...@@ -2171,6 +2171,16 @@ static int io_req_task_work_add(struct io_kiocb *req) ...@@ -2171,6 +2171,16 @@ static int io_req_task_work_add(struct io_kiocb *req)
return ret; return ret;
} }
static void io_req_task_work_add_fallback(struct io_kiocb *req,
void (*cb)(struct callback_head *))
{
struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
init_task_work(&req->task_work, cb);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
static void __io_req_task_cancel(struct io_kiocb *req, int error) static void __io_req_task_cancel(struct io_kiocb *req, int error)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -2225,14 +2235,8 @@ static void io_req_task_queue(struct io_kiocb *req) ...@@ -2225,14 +2235,8 @@ static void io_req_task_queue(struct io_kiocb *req)
percpu_ref_get(&req->ctx->refs); percpu_ref_get(&req->ctx->refs);
ret = io_req_task_work_add(req); ret = io_req_task_work_add(req);
if (unlikely(ret)) { if (unlikely(ret))
struct task_struct *tsk; io_req_task_work_add_fallback(req, io_req_task_cancel);
init_task_work(&req->task_work, io_req_task_cancel);
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
} }
static inline void io_queue_next(struct io_kiocb *req) static inline void io_queue_next(struct io_kiocb *req)
...@@ -2350,13 +2354,8 @@ static void io_free_req_deferred(struct io_kiocb *req) ...@@ -2350,13 +2354,8 @@ static void io_free_req_deferred(struct io_kiocb *req)
init_task_work(&req->task_work, io_put_req_deferred_cb); init_task_work(&req->task_work, io_put_req_deferred_cb);
ret = io_req_task_work_add(req); ret = io_req_task_work_add(req);
if (unlikely(ret)) { if (unlikely(ret))
struct task_struct *tsk; io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
} }
static inline void io_put_req_deferred(struct io_kiocb *req, int refs) static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
...@@ -3425,15 +3424,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, ...@@ -3425,15 +3424,8 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
/* submit ref gets dropped, acquire a new one */ /* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs); refcount_inc(&req->refs);
ret = io_req_task_work_add(req); ret = io_req_task_work_add(req);
if (unlikely(ret)) { if (unlikely(ret))
struct task_struct *tsk; io_req_task_work_add_fallback(req, io_req_task_cancel);
/* queue just for cancelation */
init_task_work(&req->task_work, io_req_task_cancel);
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
}
return 1; return 1;
} }
...@@ -5153,12 +5145,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, ...@@ -5153,12 +5145,8 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
*/ */
ret = io_req_task_work_add(req); ret = io_req_task_work_add(req);
if (unlikely(ret)) { if (unlikely(ret)) {
struct task_struct *tsk;
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
tsk = io_wq_get_task(req->ctx->io_wq); io_req_task_work_add_fallback(req, func);
task_work_add(tsk, &req->task_work, TWA_NONE);
wake_up_process(tsk);
} }
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment