Commit 3f48cf18 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: unify files and task cancel

Now __io_uring_cancel() and __io_uring_files_cancel() are very similar
and mostly differ by how we count requests, merge them and allow
tctx_inflight() to handle counting.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1a5986a97df4dc1378f3fe0ca1eb483dbcf42112.1618101759.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b303fe2e
...@@ -8920,13 +8920,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx) ...@@ -8920,13 +8920,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
} }
} }
static s64 tctx_inflight_tracked(struct io_uring_task *tctx) static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
{
return atomic_read(&tctx->inflight_tracked);
}
static s64 tctx_inflight(struct io_uring_task *tctx)
{ {
if (tracked)
return atomic_read(&tctx->inflight_tracked);
return percpu_counter_sum(&tctx->inflight); return percpu_counter_sum(&tctx->inflight);
} }
...@@ -8993,7 +8990,7 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx) ...@@ -8993,7 +8990,7 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
atomic_inc(&tctx->in_idle); atomic_inc(&tctx->in_idle);
do { do {
/* read completions before cancelations */ /* read completions before cancelations */
inflight = tctx_inflight(tctx); inflight = tctx_inflight(tctx, false);
if (!inflight) if (!inflight)
break; break;
io_uring_try_cancel_requests(ctx, current, NULL); io_uring_try_cancel_requests(ctx, current, NULL);
...@@ -9004,43 +9001,18 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx) ...@@ -9004,43 +9001,18 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
* avoids a race where a completion comes in before we did * avoids a race where a completion comes in before we did
* prepare_to_wait(). * prepare_to_wait().
*/ */
if (inflight == tctx_inflight(tctx)) if (inflight == tctx_inflight(tctx, false))
schedule(); schedule();
finish_wait(&tctx->wait, &wait); finish_wait(&tctx->wait, &wait);
} while (1); } while (1);
atomic_dec(&tctx->in_idle); atomic_dec(&tctx->in_idle);
} }
void __io_uring_files_cancel(struct files_struct *files)
{
struct io_uring_task *tctx = current->io_uring;
DEFINE_WAIT(wait);
s64 inflight;
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
do {
/* read completions before cancelations */
inflight = tctx_inflight_tracked(tctx);
if (!inflight)
break;
io_uring_try_cancel(files);
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
if (inflight == tctx_inflight_tracked(tctx))
schedule();
finish_wait(&tctx->wait, &wait);
} while (1);
atomic_dec(&tctx->in_idle);
io_uring_clean_tctx(tctx);
}
/* /*
* Find any io_uring fd that this task has registered or done IO on, and cancel * Find any io_uring fd that this task has registered or done IO on, and cancel
* requests. * requests.
*/ */
void __io_uring_task_cancel(void) void __io_uring_cancel(struct files_struct *files)
{ {
struct io_uring_task *tctx = current->io_uring; struct io_uring_task *tctx = current->io_uring;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -9048,15 +9020,14 @@ void __io_uring_task_cancel(void) ...@@ -9048,15 +9020,14 @@ void __io_uring_task_cancel(void)
/* make sure overflow events are dropped */ /* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle); atomic_inc(&tctx->in_idle);
__io_uring_files_cancel(NULL); io_uring_try_cancel(files);
do { do {
/* read completions before cancelations */ /* read completions before cancelations */
inflight = tctx_inflight(tctx); inflight = tctx_inflight(tctx, !!files);
if (!inflight) if (!inflight)
break; break;
io_uring_try_cancel(NULL); io_uring_try_cancel(files);
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
/* /*
...@@ -9064,16 +9035,17 @@ void __io_uring_task_cancel(void) ...@@ -9064,16 +9035,17 @@ void __io_uring_task_cancel(void)
* avoids a race where a completion comes in before we did * avoids a race where a completion comes in before we did
* prepare_to_wait(). * prepare_to_wait().
*/ */
if (inflight == tctx_inflight(tctx)) if (inflight == tctx_inflight(tctx, !!files))
schedule(); schedule();
finish_wait(&tctx->wait, &wait); finish_wait(&tctx->wait, &wait);
} while (1); } while (1);
atomic_dec(&tctx->in_idle); atomic_dec(&tctx->in_idle);
io_uring_clean_tctx(tctx); io_uring_clean_tctx(tctx);
/* all current's requests should be gone, we can kill tctx */ if (!files) {
__io_uring_free(current); /* for exec all current's requests should be gone, kill tctx */
__io_uring_free(current);
}
} }
static void *io_uring_validate_mmap_request(struct file *file, static void *io_uring_validate_mmap_request(struct file *file,
......
...@@ -7,19 +7,17 @@ ...@@ -7,19 +7,17 @@
#if defined(CONFIG_IO_URING) #if defined(CONFIG_IO_URING)
struct sock *io_uring_get_socket(struct file *file); struct sock *io_uring_get_socket(struct file *file);
void __io_uring_task_cancel(void); void __io_uring_cancel(struct files_struct *files);
void __io_uring_files_cancel(struct files_struct *files);
void __io_uring_free(struct task_struct *tsk); void __io_uring_free(struct task_struct *tsk);
static inline void io_uring_task_cancel(void) static inline void io_uring_files_cancel(struct files_struct *files)
{ {
if (current->io_uring) if (current->io_uring)
__io_uring_task_cancel(); __io_uring_cancel(files);
} }
static inline void io_uring_files_cancel(struct files_struct *files) static inline void io_uring_task_cancel(void)
{ {
if (current->io_uring) return io_uring_files_cancel(NULL);
__io_uring_files_cancel(files);
} }
static inline void io_uring_free(struct task_struct *tsk) static inline void io_uring_free(struct task_struct *tsk)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment