Commit 80c4cbdb authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: do post-completion chore on t-out cancel

Don't forget about io_commit_cqring() + io_cqring_ev_posted() after
exit/exec cancelling timeouts. Both functions declared only after
io_kill_timeouts(), so to avoid tons of forward declarations move
it down.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/72ace588772c0f14834a6a4185d56c445a366fb4.1616696997.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1ee4160c
...@@ -1262,26 +1262,6 @@ static void io_kill_timeout(struct io_kiocb *req, int status) ...@@ -1262,26 +1262,6 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
} }
} }
/*
* Returns true if we found and killed one or more timeouts
*/
static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct files_struct *files)
{
struct io_kiocb *req, *tmp;
int canceled = 0;
spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
if (io_match_task(req, tsk, files)) {
io_kill_timeout(req, -ECANCELED);
canceled++;
}
}
spin_unlock_irq(&ctx->completion_lock);
return canceled != 0;
}
static void __io_queue_deferred(struct io_ring_ctx *ctx) static void __io_queue_deferred(struct io_ring_ctx *ctx)
{ {
do { do {
...@@ -8611,6 +8591,28 @@ static void io_ring_exit_work(struct work_struct *work) ...@@ -8611,6 +8591,28 @@ static void io_ring_exit_work(struct work_struct *work)
io_ring_ctx_free(ctx); io_ring_ctx_free(ctx);
} }
/* Returns true if we found and killed one or more timeouts */
static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct files_struct *files)
{
struct io_kiocb *req, *tmp;
int canceled = 0;
spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
if (io_match_task(req, tsk, files)) {
io_kill_timeout(req, -ECANCELED);
canceled++;
}
}
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
if (canceled != 0)
io_cqring_ev_posted(ctx);
return canceled != 0;
}
static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{ {
unsigned long index; unsigned long index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment