Commit 0e9ddb39 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: cleanup up cancel SQPOLL reqs across exec

For SQPOLL rings tctx_inflight() always returns zero, so it might skip
doing full cancelation. It's fine because we jam all sqpoll submissions
in any case and do go through files cancel for them, but not nice.

Do the intended full cancellation, by mimicking __io_uring_task_cancel()
waiting but impersonating SQPOLL task.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 257e84a5
......@@ -9083,29 +9083,39 @@ void __io_uring_files_cancel(struct files_struct *files)
static s64 tctx_inflight(struct io_uring_task *tctx)
{
unsigned long index;
struct file *file;
return percpu_counter_sum(&tctx->inflight);
}
static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
{
struct io_uring_task *tctx;
s64 inflight;
DEFINE_WAIT(wait);
if (!ctx->sq_data)
return;
tctx = ctx->sq_data->thread->io_uring;
io_disable_sqo_submit(ctx);
inflight = percpu_counter_sum(&tctx->inflight);
if (!tctx->sqpoll)
return inflight;
atomic_inc(&tctx->in_idle);
do {
/* read completions before cancelations */
inflight = tctx_inflight(tctx);
if (!inflight)
break;
io_uring_cancel_task_requests(ctx, NULL);
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
/*
* If we have SQPOLL rings, then we need to iterate and find them, and
* add the pending count for those.
* If we've seen completions, retry without waiting. This
* avoids a race where a completion comes in before we did
* prepare_to_wait().
*/
xa_for_each(&tctx->xa, index, file) {
struct io_ring_ctx *ctx = file->private_data;
if (ctx->flags & IORING_SETUP_SQPOLL) {
struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
inflight += percpu_counter_sum(&__tctx->inflight);
}
}
return inflight;
if (inflight == tctx_inflight(tctx))
schedule();
finish_wait(&tctx->wait, &wait);
} while (1);
atomic_dec(&tctx->in_idle);
}
/*
......@@ -9122,8 +9132,13 @@ void __io_uring_task_cancel(void)
atomic_inc(&tctx->in_idle);
/* trigger io_disable_sqo_submit() */
if (tctx->sqpoll)
__io_uring_files_cancel(NULL);
if (tctx->sqpoll) {
struct file *file;
unsigned long index;
xa_for_each(&tctx->xa, index, file)
io_uring_cancel_sqpoll(file->private_data);
}
do {
/* read completions before cancelations */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment