Commit aa96bf8a authored by Jens Axboe's avatar Jens Axboe

io_uring: use io-wq manager as backup task if task is exiting

If the original task is (or has) exited, then the task work will not get
queued properly. Allow for using the io-wq manager task to queue this
work for execution, and ensure that the io-wq manager notices and runs
this work if woken up (or exiting).
Reported-by: default avatarDan Melnic <dmm@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3537b6a7
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/rculist_nulls.h> #include <linux/rculist_nulls.h>
#include <linux/fs_struct.h> #include <linux/fs_struct.h>
#include <linux/task_work.h>
#include "io-wq.h" #include "io-wq.h"
...@@ -716,6 +717,9 @@ static int io_wq_manager(void *data) ...@@ -716,6 +717,9 @@ static int io_wq_manager(void *data)
complete(&wq->done); complete(&wq->done);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
if (current->task_works)
task_work_run();
for_each_node(node) { for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node]; struct io_wqe *wqe = wq->wqes[node];
bool fork_worker[2] = { false, false }; bool fork_worker[2] = { false, false };
...@@ -738,6 +742,9 @@ static int io_wq_manager(void *data) ...@@ -738,6 +742,9 @@ static int io_wq_manager(void *data)
schedule_timeout(HZ); schedule_timeout(HZ);
} }
if (current->task_works)
task_work_run();
return 0; return 0;
err: err:
set_bit(IO_WQ_BIT_ERROR, &wq->state); set_bit(IO_WQ_BIT_ERROR, &wq->state);
...@@ -1124,3 +1131,8 @@ void io_wq_destroy(struct io_wq *wq) ...@@ -1124,3 +1131,8 @@ void io_wq_destroy(struct io_wq *wq)
if (refcount_dec_and_test(&wq->use_refs)) if (refcount_dec_and_test(&wq->use_refs))
__io_wq_destroy(wq); __io_wq_destroy(wq);
} }
struct task_struct *io_wq_get_task(struct io_wq *wq)
{
return wq->manager;
}
...@@ -136,6 +136,8 @@ typedef bool (work_cancel_fn)(struct io_wq_work *, void *); ...@@ -136,6 +136,8 @@ typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data); void *data);
struct task_struct *io_wq_get_task(struct io_wq *wq);
#if defined(CONFIG_IO_WQ) #if defined(CONFIG_IO_WQ)
extern void io_wq_worker_sleeping(struct task_struct *); extern void io_wq_worker_sleeping(struct task_struct *);
extern void io_wq_worker_running(struct task_struct *); extern void io_wq_worker_running(struct task_struct *);
......
...@@ -4120,6 +4120,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, ...@@ -4120,6 +4120,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func) __poll_t mask, task_work_func_t func)
{ {
struct task_struct *tsk; struct task_struct *tsk;
int ret;
/* for instances that support it check for an event match first: */ /* for instances that support it check for an event match first: */
if (mask && !(mask & poll->events)) if (mask && !(mask & poll->events))
...@@ -4133,11 +4134,15 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, ...@@ -4133,11 +4134,15 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
req->result = mask; req->result = mask;
init_task_work(&req->task_work, func); init_task_work(&req->task_work, func);
/* /*
* If this fails, then the task is exiting. If that is the case, then * If this fails, then the task is exiting. Punt to one of the io-wq
* the exit check will ultimately cancel these work items. Hence we * threads to ensure the work gets run, we can't always rely on exit
* don't need to check here and handle it specifically. * cancelation taking care of this.
*/ */
task_work_add(tsk, &req->task_work, true); ret = task_work_add(tsk, &req->task_work, true);
if (unlikely(ret)) {
tsk = io_wq_get_task(req->ctx->io_wq);
task_work_add(tsk, &req->task_work, true);
}
wake_up_process(tsk); wake_up_process(tsk);
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment