Commit 8d664282 authored by Jens Axboe's avatar Jens Axboe

io_uring: rename 'in_idle' to 'in_cancel'

This better describes what it does - it's incremented when the task is
currently undergoing a cancelation operation, due to exiting or exec'ing.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ce8e04f6
...@@ -58,7 +58,7 @@ struct io_uring_task { ...@@ -58,7 +58,7 @@ struct io_uring_task {
struct xarray xa; struct xarray xa;
struct wait_queue_head wait; struct wait_queue_head wait;
atomic_t in_idle; atomic_t in_cancel;
atomic_t inflight_tracked; atomic_t inflight_tracked;
struct percpu_counter inflight; struct percpu_counter inflight;
......
...@@ -719,7 +719,7 @@ static void io_put_task_remote(struct task_struct *task, int nr) ...@@ -719,7 +719,7 @@ static void io_put_task_remote(struct task_struct *task, int nr)
struct io_uring_task *tctx = task->io_uring; struct io_uring_task *tctx = task->io_uring;
percpu_counter_sub(&tctx->inflight, nr); percpu_counter_sub(&tctx->inflight, nr);
if (unlikely(atomic_read(&tctx->in_idle))) if (unlikely(atomic_read(&tctx->in_cancel)))
wake_up(&tctx->wait); wake_up(&tctx->wait);
put_task_struct_many(task, nr); put_task_struct_many(task, nr);
} }
...@@ -1258,8 +1258,8 @@ void tctx_task_work(struct callback_head *cb) ...@@ -1258,8 +1258,8 @@ void tctx_task_work(struct callback_head *cb)
ctx_flush_and_put(ctx, &uring_locked); ctx_flush_and_put(ctx, &uring_locked);
/* relaxed read is enough as only the task itself sets ->in_idle */ /* relaxed read is enough as only the task itself sets ->in_cancel */
if (unlikely(atomic_read(&tctx->in_idle))) if (unlikely(atomic_read(&tctx->in_cancel)))
io_uring_drop_tctx_refs(current); io_uring_drop_tctx_refs(current);
trace_io_uring_task_work_run(tctx, count, loops); trace_io_uring_task_work_run(tctx, count, loops);
...@@ -1291,7 +1291,7 @@ static void io_req_local_work_add(struct io_kiocb *req) ...@@ -1291,7 +1291,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
/* needed for the following wake up */ /* needed for the following wake up */
smp_mb__after_atomic(); smp_mb__after_atomic();
if (unlikely(atomic_read(&req->task->io_uring->in_idle))) { if (unlikely(atomic_read(&req->task->io_uring->in_cancel))) {
io_move_task_work_from_local(ctx); io_move_task_work_from_local(ctx);
goto put_ref; goto put_ref;
} }
...@@ -2937,12 +2937,12 @@ static __cold void io_tctx_exit_cb(struct callback_head *cb) ...@@ -2937,12 +2937,12 @@ static __cold void io_tctx_exit_cb(struct callback_head *cb)
work = container_of(cb, struct io_tctx_exit, task_work); work = container_of(cb, struct io_tctx_exit, task_work);
/* /*
* When @in_idle, we're in cancellation and it's racy to remove the * When @in_cancel, we're in cancellation and it's racy to remove the
* node. It'll be removed by the end of cancellation, just ignore it. * node. It'll be removed by the end of cancellation, just ignore it.
* tctx can be NULL if the queueing of this task_work raced with * tctx can be NULL if the queueing of this task_work raced with
* work cancelation off the exec path. * work cancelation off the exec path.
*/ */
if (tctx && !atomic_read(&tctx->in_idle)) if (tctx && !atomic_read(&tctx->in_cancel))
io_uring_del_tctx_node((unsigned long)work->ctx); io_uring_del_tctx_node((unsigned long)work->ctx);
complete(&work->completion); complete(&work->completion);
} }
...@@ -3210,7 +3210,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) ...@@ -3210,7 +3210,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
if (tctx->io_wq) if (tctx->io_wq)
io_wq_exit_start(tctx->io_wq); io_wq_exit_start(tctx->io_wq);
atomic_inc(&tctx->in_idle); atomic_inc(&tctx->in_cancel);
do { do {
bool loop = false; bool loop = false;
...@@ -3261,9 +3261,9 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) ...@@ -3261,9 +3261,9 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
if (cancel_all) { if (cancel_all) {
/* /*
* We shouldn't run task_works after cancel, so just leave * We shouldn't run task_works after cancel, so just leave
* ->in_idle set for normal exit. * ->in_cancel set for normal exit.
*/ */
atomic_dec(&tctx->in_idle); atomic_dec(&tctx->in_cancel);
/* for exec all current's requests should be gone, kill tctx */ /* for exec all current's requests should be gone, kill tctx */
__io_uring_free(current); __io_uring_free(current);
} }
......
...@@ -83,7 +83,7 @@ __cold int io_uring_alloc_task_context(struct task_struct *task, ...@@ -83,7 +83,7 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
xa_init(&tctx->xa); xa_init(&tctx->xa);
init_waitqueue_head(&tctx->wait); init_waitqueue_head(&tctx->wait);
atomic_set(&tctx->in_idle, 0); atomic_set(&tctx->in_cancel, 0);
atomic_set(&tctx->inflight_tracked, 0); atomic_set(&tctx->inflight_tracked, 0);
task->io_uring = tctx; task->io_uring = tctx;
init_llist_head(&tctx->task_list); init_llist_head(&tctx->task_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment