Commit 3bfe6106 authored by Jens Axboe's avatar Jens Axboe

io-wq: fork worker threads from original task

Instead of using regular kthread kernel threads, create kernel threads
that are like a real thread that the task would create. This ensures that
we get all the context that we need, without having to carry that state
around. This greatly reduces the code complexity, and the risk of missing
state for a given request type.

With the move away from kthread, we can also dump everything related to
assigned state to the new threads.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6fb8f43c
This diff is collapsed.
...@@ -137,6 +137,7 @@ static inline void io_wq_worker_running(struct task_struct *tsk) ...@@ -137,6 +137,7 @@ static inline void io_wq_worker_running(struct task_struct *tsk)
static inline bool io_wq_current_is_worker(void) static inline bool io_wq_current_is_worker(void)
{ {
return in_task() && (current->flags & PF_IO_WORKER); return in_task() && (current->flags & PF_IO_WORKER) &&
current->pf_io_worker;
} }
#endif #endif
...@@ -1633,6 +1633,9 @@ static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req) ...@@ -1633,6 +1633,9 @@ static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
struct io_kiocb *link = io_prep_linked_timeout(req); struct io_kiocb *link = io_prep_linked_timeout(req);
struct io_uring_task *tctx = req->task->io_uring; struct io_uring_task *tctx = req->task->io_uring;
BUG_ON(!tctx);
BUG_ON(!tctx->io_wq);
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags); &req->work, req->flags);
io_wq_enqueue(tctx->io_wq, &req->work); io_wq_enqueue(tctx->io_wq, &req->work);
...@@ -9240,6 +9243,10 @@ static int io_uring_flush(struct file *file, void *data) ...@@ -9240,6 +9243,10 @@ static int io_uring_flush(struct file *file, void *data)
struct io_uring_task *tctx = current->io_uring; struct io_uring_task *tctx = current->io_uring;
struct io_ring_ctx *ctx = file->private_data; struct io_ring_ctx *ctx = file->private_data;
/* Ignore helper thread files exit */
if (current->flags & PF_IO_WORKER)
return 0;
if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) { if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
io_uring_cancel_task_requests(ctx, NULL); io_uring_cancel_task_requests(ctx, NULL);
io_req_caches_free(ctx, current); io_req_caches_free(ctx, current);
......
...@@ -895,6 +895,9 @@ struct task_struct { ...@@ -895,6 +895,9 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */ /* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid; int __user *clear_child_tid;
/* PF_IO_WORKER */
void *pf_io_worker;
u64 utime; u64 utime;
u64 stime; u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment