Commit 7c25c0d1 authored by Jens Axboe's avatar Jens Axboe

io_uring: remove the need for relying on an io-wq fallback worker

We hit this case when the task is exiting, and we need somewhere to
do background cleanup of requests. Instead of relying on the io-wq
task manager to do this work for us, just stuff it somewhere where
we can safely run it ourselves directly.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 27131549
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/rculist_nulls.h> #include <linux/rculist_nulls.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/task_work.h>
#include <linux/blk-cgroup.h> #include <linux/blk-cgroup.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/cpu.h> #include <linux/cpu.h>
...@@ -775,9 +774,6 @@ static int io_wq_manager(void *data) ...@@ -775,9 +774,6 @@ static int io_wq_manager(void *data)
complete(&wq->done); complete(&wq->done);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
if (current->task_works)
task_work_run();
for_each_node(node) { for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node]; struct io_wqe *wqe = wq->wqes[node];
bool fork_worker[2] = { false, false }; bool fork_worker[2] = { false, false };
...@@ -800,9 +796,6 @@ static int io_wq_manager(void *data) ...@@ -800,9 +796,6 @@ static int io_wq_manager(void *data)
schedule_timeout(HZ); schedule_timeout(HZ);
} }
if (current->task_works)
task_work_run();
out: out:
if (refcount_dec_and_test(&wq->refs)) { if (refcount_dec_and_test(&wq->refs)) {
complete(&wq->done); complete(&wq->done);
...@@ -1160,11 +1153,6 @@ void io_wq_destroy(struct io_wq *wq) ...@@ -1160,11 +1153,6 @@ void io_wq_destroy(struct io_wq *wq)
__io_wq_destroy(wq); __io_wq_destroy(wq);
} }
struct task_struct *io_wq_get_task(struct io_wq *wq)
{
return wq->manager;
}
static bool io_wq_worker_affinity(struct io_worker *worker, void *data) static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
{ {
struct task_struct *task = worker->task; struct task_struct *task = worker->task;
......
...@@ -124,8 +124,6 @@ typedef bool (work_cancel_fn)(struct io_wq_work *, void *); ...@@ -124,8 +124,6 @@ typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data, bool cancel_all); void *data, bool cancel_all);
struct task_struct *io_wq_get_task(struct io_wq *wq);
#if defined(CONFIG_IO_WQ) #if defined(CONFIG_IO_WQ)
extern void io_wq_worker_sleeping(struct task_struct *); extern void io_wq_worker_sleeping(struct task_struct *);
extern void io_wq_worker_running(struct task_struct *); extern void io_wq_worker_running(struct task_struct *);
......
...@@ -456,6 +456,9 @@ struct io_ring_ctx { ...@@ -456,6 +456,9 @@ struct io_ring_ctx {
struct io_restriction restrictions; struct io_restriction restrictions;
/* exit task_work */
struct callback_head *exit_task_work;
/* Keep this last, we don't need it for the fast path */ /* Keep this last, we don't need it for the fast path */
struct work_struct exit_work; struct work_struct exit_work;
}; };
...@@ -2328,11 +2331,14 @@ static int io_req_task_work_add(struct io_kiocb *req) ...@@ -2328,11 +2331,14 @@ static int io_req_task_work_add(struct io_kiocb *req)
static void io_req_task_work_add_fallback(struct io_kiocb *req, static void io_req_task_work_add_fallback(struct io_kiocb *req,
task_work_func_t cb) task_work_func_t cb)
{ {
struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq); struct io_ring_ctx *ctx = req->ctx;
struct callback_head *head;
init_task_work(&req->task_work, cb); init_task_work(&req->task_work, cb);
task_work_add(tsk, &req->task_work, TWA_NONE); do {
wake_up_process(tsk); head = READ_ONCE(ctx->exit_task_work);
req->task_work.next = head;
} while (cmpxchg(&ctx->exit_task_work, head, &req->task_work) != head);
} }
static void __io_req_task_cancel(struct io_kiocb *req, int error) static void __io_req_task_cancel(struct io_kiocb *req, int error)
...@@ -8835,6 +8841,28 @@ static int io_remove_personalities(int id, void *p, void *data) ...@@ -8835,6 +8841,28 @@ static int io_remove_personalities(int id, void *p, void *data)
return 0; return 0;
} }
static void io_run_ctx_fallback(struct io_ring_ctx *ctx)
{
struct callback_head *work, *head, *next;
do {
do {
head = NULL;
work = READ_ONCE(ctx->exit_task_work);
} while (cmpxchg(&ctx->exit_task_work, work, head) != work);
if (!work)
break;
do {
next = work->next;
work->func(work);
work = next;
cond_resched();
} while (work);
} while (1);
}
static void io_ring_exit_work(struct work_struct *work) static void io_ring_exit_work(struct work_struct *work)
{ {
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
...@@ -8848,6 +8876,7 @@ static void io_ring_exit_work(struct work_struct *work) ...@@ -8848,6 +8876,7 @@ static void io_ring_exit_work(struct work_struct *work)
*/ */
do { do {
io_uring_try_cancel_requests(ctx, NULL, NULL); io_uring_try_cancel_requests(ctx, NULL, NULL);
io_run_ctx_fallback(ctx);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx); io_ring_ctx_free(ctx);
} }
...@@ -9243,6 +9272,8 @@ static int io_uring_flush(struct file *file, void *data) ...@@ -9243,6 +9272,8 @@ static int io_uring_flush(struct file *file, void *data)
io_req_caches_free(ctx, current); io_req_caches_free(ctx, current);
} }
io_run_ctx_fallback(ctx);
if (!tctx) if (!tctx)
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment