Commit 0453aad6 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring/io-wq: limit retrying worker initialisation

If io-wq worker creation fails, we retry it by queueing up a task_work.
tasK_work is needed because it should be done from the user process
context. The problem is that retries are not limited, and if queueing a
task_work is the reason for the failure, we might get into an infinite
loop.

It doesn't seem to happen now but it would with the following patch
executing task_work in the freezer's loop. For now, arbitrarily limit the
number of attempts to create a worker.

Cc: stable@vger.kernel.org
Fixes: 3146cba9 ("io-wq: make worker creation resilient against signals")
Reported-by: default avatarJulian Orth <ju.orth@gmail.com>
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8280436925db88448c7c85c6656edee1a43029ea.1720634146.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f7c696a5
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "io_uring.h" #include "io_uring.h"
#define WORKER_IDLE_TIMEOUT (5 * HZ) #define WORKER_IDLE_TIMEOUT (5 * HZ)
#define WORKER_INIT_LIMIT 3
enum { enum {
IO_WORKER_F_UP = 0, /* up and active */ IO_WORKER_F_UP = 0, /* up and active */
...@@ -58,6 +59,7 @@ struct io_worker { ...@@ -58,6 +59,7 @@ struct io_worker {
unsigned long create_state; unsigned long create_state;
struct callback_head create_work; struct callback_head create_work;
int init_retries;
union { union {
struct rcu_head rcu; struct rcu_head rcu;
...@@ -745,7 +747,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data) ...@@ -745,7 +747,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
return true; return true;
} }
static inline bool io_should_retry_thread(long err) static inline bool io_should_retry_thread(struct io_worker *worker, long err)
{ {
/* /*
* Prevent perpetual task_work retry, if the task (or its group) is * Prevent perpetual task_work retry, if the task (or its group) is
...@@ -753,6 +755,8 @@ static inline bool io_should_retry_thread(long err) ...@@ -753,6 +755,8 @@ static inline bool io_should_retry_thread(long err)
*/ */
if (fatal_signal_pending(current)) if (fatal_signal_pending(current))
return false; return false;
if (worker->init_retries++ >= WORKER_INIT_LIMIT)
return false;
switch (err) { switch (err) {
case -EAGAIN: case -EAGAIN:
...@@ -779,7 +783,7 @@ static void create_worker_cont(struct callback_head *cb) ...@@ -779,7 +783,7 @@ static void create_worker_cont(struct callback_head *cb)
io_init_new_worker(wq, worker, tsk); io_init_new_worker(wq, worker, tsk);
io_worker_release(worker); io_worker_release(worker);
return; return;
} else if (!io_should_retry_thread(PTR_ERR(tsk))) { } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
struct io_wq_acct *acct = io_wq_get_acct(worker); struct io_wq_acct *acct = io_wq_get_acct(worker);
atomic_dec(&acct->nr_running); atomic_dec(&acct->nr_running);
...@@ -846,7 +850,7 @@ static bool create_io_worker(struct io_wq *wq, int index) ...@@ -846,7 +850,7 @@ static bool create_io_worker(struct io_wq *wq, int index)
tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
if (!IS_ERR(tsk)) { if (!IS_ERR(tsk)) {
io_init_new_worker(wq, worker, tsk); io_init_new_worker(wq, worker, tsk);
} else if (!io_should_retry_thread(PTR_ERR(tsk))) { } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
kfree(worker); kfree(worker);
goto fail; goto fail;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment