Commit f3606e3a authored by Jens Axboe's avatar Jens Axboe

io_uring: allow timeout/poll/files killing to take task into account

We currently cancel these when the ring exits, and we cancel all of
them. This is in preparation for killing only the ones associated
with a given task.
Reviewed-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0f078896
...@@ -1226,13 +1226,26 @@ static void io_kill_timeout(struct io_kiocb *req) ...@@ -1226,13 +1226,26 @@ static void io_kill_timeout(struct io_kiocb *req)
} }
} }
static void io_kill_timeouts(struct io_ring_ctx *ctx) static bool io_task_match(struct io_kiocb *req, struct task_struct *tsk)
{
struct io_ring_ctx *ctx = req->ctx;
if (!tsk || req->task == tsk)
return true;
if ((ctx->flags & IORING_SETUP_SQPOLL) && req->task == ctx->sqo_thread)
return true;
return false;
}
static void io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk)
{ {
struct io_kiocb *req, *tmp; struct io_kiocb *req, *tmp;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
io_kill_timeout(req); if (io_task_match(req, tsk))
io_kill_timeout(req);
}
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
} }
...@@ -5017,7 +5030,7 @@ static bool io_poll_remove_one(struct io_kiocb *req) ...@@ -5017,7 +5030,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)
return do_complete; return do_complete;
} }
static void io_poll_remove_all(struct io_ring_ctx *ctx) static void io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk)
{ {
struct hlist_node *tmp; struct hlist_node *tmp;
struct io_kiocb *req; struct io_kiocb *req;
...@@ -5028,8 +5041,10 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx) ...@@ -5028,8 +5041,10 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
struct hlist_head *list; struct hlist_head *list;
list = &ctx->cancel_hash[i]; list = &ctx->cancel_hash[i];
hlist_for_each_entry_safe(req, tmp, list, hash_node) hlist_for_each_entry_safe(req, tmp, list, hash_node) {
posted += io_poll_remove_one(req); if (io_task_match(req, tsk))
posted += io_poll_remove_one(req);
}
} }
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
...@@ -7989,8 +8004,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -7989,8 +8004,8 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
percpu_ref_kill(&ctx->refs); percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
io_kill_timeouts(ctx); io_kill_timeouts(ctx, NULL);
io_poll_remove_all(ctx); io_poll_remove_all(ctx, NULL);
if (ctx->io_wq) if (ctx->io_wq)
io_wq_cancel_all(ctx->io_wq); io_wq_cancel_all(ctx->io_wq);
...@@ -8221,7 +8236,7 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) ...@@ -8221,7 +8236,7 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct task_struct *task = data; struct task_struct *task = data;
return req->task == task; return io_task_match(req, task);
} }
static int io_uring_flush(struct file *file, void *data) static int io_uring_flush(struct file *file, void *data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment