Commit 0298ef96 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: clean R_DISABLED startup mess

There are enough of problems with IORING_SETUP_R_DISABLED, including the
burden of checking and kicking off the SQO task all over the codebase --
for exit/cancel/etc.

Rework it, always start the thread but don't do submit unless the flag
is gone, that's much easier.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f458dd84
...@@ -6606,7 +6606,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) ...@@ -6606,7 +6606,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (!list_empty(&ctx->iopoll_list)) if (!list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, &nr_events, 0); io_do_iopoll(ctx, &nr_events, 0);
if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs))) if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
!(ctx->flags & IORING_SETUP_R_DISABLED))
ret = io_submit_sqes(ctx, to_submit); ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
...@@ -7861,6 +7862,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, ...@@ -7861,6 +7862,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
wake_up_new_task(tsk); wake_up_new_task(tsk);
if (ret) if (ret)
goto err; goto err;
complete(&sqd->startup);
} else if (p->flags & IORING_SETUP_SQ_AFF) { } else if (p->flags & IORING_SETUP_SQ_AFF) {
/* Can't have SQ_AFF without SQPOLL */ /* Can't have SQ_AFF without SQPOLL */
ret = -EINVAL; ret = -EINVAL;
...@@ -7873,15 +7875,6 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx, ...@@ -7873,15 +7875,6 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
return ret; return ret;
} }
static void io_sq_offload_start(struct io_ring_ctx *ctx)
{
struct io_sq_data *sqd = ctx->sq_data;
ctx->flags &= ~IORING_SETUP_R_DISABLED;
if (ctx->flags & IORING_SETUP_SQPOLL)
complete(&sqd->startup);
}
static inline void __io_unaccount_mem(struct user_struct *user, static inline void __io_unaccount_mem(struct user_struct *user,
unsigned long nr_pages) unsigned long nr_pages)
{ {
...@@ -8742,11 +8735,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, ...@@ -8742,11 +8735,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
struct task_struct *task = current; struct task_struct *task = current;
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
/* never started, nothing to cancel */
if (ctx->flags & IORING_SETUP_R_DISABLED) {
io_sq_offload_start(ctx);
return;
}
io_sq_thread_park(ctx->sq_data); io_sq_thread_park(ctx->sq_data);
task = ctx->sq_data->thread; task = ctx->sq_data->thread;
if (task) if (task)
...@@ -9449,9 +9437,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ...@@ -9449,9 +9437,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
if (ret) if (ret)
goto err; goto err;
if (!(p->flags & IORING_SETUP_R_DISABLED))
io_sq_offload_start(ctx);
memset(&p->sq_off, 0, sizeof(p->sq_off)); memset(&p->sq_off, 0, sizeof(p->sq_off));
p->sq_off.head = offsetof(struct io_rings, sq.head); p->sq_off.head = offsetof(struct io_rings, sq.head);
p->sq_off.tail = offsetof(struct io_rings, sq.tail); p->sq_off.tail = offsetof(struct io_rings, sq.tail);
...@@ -9668,7 +9653,9 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx) ...@@ -9668,7 +9653,9 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
if (ctx->restrictions.registered) if (ctx->restrictions.registered)
ctx->restricted = 1; ctx->restricted = 1;
io_sq_offload_start(ctx); ctx->flags &= ~IORING_SETUP_R_DISABLED;
if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
wake_up(&ctx->sq_data->wait);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment