Commit 7271ef3a authored by Jens Axboe's avatar Jens Axboe

io_uring: fix recursive completion locking on oveflow flush

syszbot reports a scenario where we recurse on the completion lock
when flushing an overflow:

1 lock held by syz-executor287/6816:
 #0: ffff888093cdb4d8 (&ctx->completion_lock){....}-{2:2}, at: io_cqring_overflow_flush+0xc6/0xab0 fs/io_uring.c:1333

stack backtrace:
CPU: 1 PID: 6816 Comm: syz-executor287 Not tainted 5.8.0-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
 __dump_stack lib/dump_stack.c:77 [inline]
 dump_stack+0x1f0/0x31e lib/dump_stack.c:118
 print_deadlock_bug kernel/locking/lockdep.c:2391 [inline]
 check_deadlock kernel/locking/lockdep.c:2432 [inline]
 validate_chain+0x69a4/0x88a0 kernel/locking/lockdep.c:3202
 __lock_acquire+0x1161/0x2ab0 kernel/locking/lockdep.c:4426
 lock_acquire+0x160/0x730 kernel/locking/lockdep.c:5005
 __raw_spin_lock_irq include/linux/spinlock_api_smp.h:128 [inline]
 _raw_spin_lock_irq+0x67/0x80 kernel/locking/spinlock.c:167
 spin_lock_irq include/linux/spinlock.h:379 [inline]
 io_queue_linked_timeout fs/io_uring.c:5928 [inline]
 __io_queue_async_work fs/io_uring.c:1192 [inline]
 __io_queue_deferred+0x36a/0x790 fs/io_uring.c:1237
 io_cqring_overflow_flush+0x774/0xab0 fs/io_uring.c:1359
 io_ring_ctx_wait_and_kill+0x2a1/0x570 fs/io_uring.c:7808
 io_uring_release+0x59/0x70 fs/io_uring.c:7829
 __fput+0x34f/0x7b0 fs/file_table.c:281
 task_work_run+0x137/0x1c0 kernel/task_work.c:135
 exit_task_work include/linux/task_work.h:25 [inline]
 do_exit+0x5f3/0x1f20 kernel/exit.c:806
 do_group_exit+0x161/0x2d0 kernel/exit.c:903
 __do_sys_exit_group+0x13/0x20 kernel/exit.c:914
 __se_sys_exit_group+0x10/0x10 kernel/exit.c:912
 __x64_sys_exit_group+0x37/0x40 kernel/exit.c:912
 do_syscall_64+0x31/0x70 arch/x86/entry/common.c:46
 entry_SYSCALL_64_after_hwframe+0x44/0xa9

Fix this by passing back the link from __io_queue_async_work(), and
then let the caller handle the queueing of the link. Take care to also
punt the submission reference put to the caller, as we're holding the
completion lock for the __io_queue_defer() case. Hence we need to mark
the io_kiocb appropriately for that case.

Reported-by: syzbot+996f91b6ec3812c48042@syzkaller.appspotmail.com
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0ba9c9ed
...@@ -898,6 +898,7 @@ static void io_put_req(struct io_kiocb *req); ...@@ -898,6 +898,7 @@ static void io_put_req(struct io_kiocb *req);
static void io_double_put_req(struct io_kiocb *req); static void io_double_put_req(struct io_kiocb *req);
static void __io_double_put_req(struct io_kiocb *req); static void __io_double_put_req(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
static void __io_queue_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req); static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx, static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_files_update *ip, struct io_uring_files_update *ip,
...@@ -1179,7 +1180,7 @@ static void io_prep_async_link(struct io_kiocb *req) ...@@ -1179,7 +1180,7 @@ static void io_prep_async_link(struct io_kiocb *req)
io_prep_async_work(cur); io_prep_async_work(cur);
} }
static void __io_queue_async_work(struct io_kiocb *req) static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link = io_prep_linked_timeout(req); struct io_kiocb *link = io_prep_linked_timeout(req);
...@@ -1187,16 +1188,19 @@ static void __io_queue_async_work(struct io_kiocb *req) ...@@ -1187,16 +1188,19 @@ static void __io_queue_async_work(struct io_kiocb *req)
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags); &req->work, req->flags);
io_wq_enqueue(ctx->io_wq, &req->work); io_wq_enqueue(ctx->io_wq, &req->work);
return link;
if (link)
io_queue_linked_timeout(link);
} }
static void io_queue_async_work(struct io_kiocb *req) static void io_queue_async_work(struct io_kiocb *req)
{ {
struct io_kiocb *link;
/* init ->work of the whole link before punting */ /* init ->work of the whole link before punting */
io_prep_async_link(req); io_prep_async_link(req);
__io_queue_async_work(req); link = __io_queue_async_work(req);
if (link)
io_queue_linked_timeout(link);
} }
static void io_kill_timeout(struct io_kiocb *req) static void io_kill_timeout(struct io_kiocb *req)
...@@ -1229,12 +1233,19 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) ...@@ -1229,12 +1233,19 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
do { do {
struct io_defer_entry *de = list_first_entry(&ctx->defer_list, struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
struct io_defer_entry, list); struct io_defer_entry, list);
struct io_kiocb *link;
if (req_need_defer(de->req, de->seq)) if (req_need_defer(de->req, de->seq))
break; break;
list_del_init(&de->list); list_del_init(&de->list);
/* punt-init is done before queueing for defer */ /* punt-init is done before queueing for defer */
__io_queue_async_work(de->req); link = __io_queue_async_work(de->req);
if (link) {
__io_queue_linked_timeout(link);
/* drop submission reference */
link->flags |= REQ_F_COMP_LOCKED;
io_put_req(link);
}
kfree(de); kfree(de);
} while (!list_empty(&ctx->defer_list)); } while (!list_empty(&ctx->defer_list));
} }
...@@ -5939,15 +5950,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -5939,15 +5950,12 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static void io_queue_linked_timeout(struct io_kiocb *req) static void __io_queue_linked_timeout(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx;
/* /*
* If the list is now empty, then our linked request finished before * If the list is now empty, then our linked request finished before
* we got a chance to setup the timer * we got a chance to setup the timer
*/ */
spin_lock_irq(&ctx->completion_lock);
if (!list_empty(&req->link_list)) { if (!list_empty(&req->link_list)) {
struct io_timeout_data *data = &req->io->timeout; struct io_timeout_data *data = &req->io->timeout;
...@@ -5955,6 +5963,14 @@ static void io_queue_linked_timeout(struct io_kiocb *req) ...@@ -5955,6 +5963,14 @@ static void io_queue_linked_timeout(struct io_kiocb *req)
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
data->mode); data->mode);
} }
}
static void io_queue_linked_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
spin_lock_irq(&ctx->completion_lock);
__io_queue_linked_timeout(req);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
/* drop submission reference */ /* drop submission reference */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment