Commit d60aa65b authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: merge CQ and poll waitqueues

->cq_wait and ->poll_wait and waken up in the same manner, use a single
waitqueue for both of them. CQ waiters are queued exclusively, so wake
up should first go over all pollers and that's what we need.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/00fe603e50000365774cf8435ef5fe03f049c1c9.1633373302.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent aede728a
...@@ -394,7 +394,6 @@ struct io_ring_ctx { ...@@ -394,7 +394,6 @@ struct io_ring_ctx {
unsigned cached_cq_tail; unsigned cached_cq_tail;
unsigned cq_entries; unsigned cq_entries;
struct eventfd_ctx *cq_ev_fd; struct eventfd_ctx *cq_ev_fd;
struct wait_queue_head poll_wait;
struct wait_queue_head cq_wait; struct wait_queue_head cq_wait;
unsigned cq_extra; unsigned cq_extra;
atomic_t cq_timeouts; atomic_t cq_timeouts;
...@@ -1300,7 +1299,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -1300,7 +1299,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
ctx->flags = p->flags; ctx->flags = p->flags;
init_waitqueue_head(&ctx->sqo_sq_wait); init_waitqueue_head(&ctx->sqo_sq_wait);
INIT_LIST_HEAD(&ctx->sqd_list); INIT_LIST_HEAD(&ctx->sqd_list);
init_waitqueue_head(&ctx->poll_wait);
INIT_LIST_HEAD(&ctx->cq_overflow_list); INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->ref_comp); init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1); xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
...@@ -1621,8 +1619,6 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) ...@@ -1621,8 +1619,6 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
wake_up_all(&ctx->cq_wait); wake_up_all(&ctx->cq_wait);
if (io_should_trigger_evfd(ctx)) if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1); eventfd_signal(ctx->cq_ev_fd, 1);
if (waitqueue_active(&ctx->poll_wait))
wake_up_interruptible(&ctx->poll_wait);
} }
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
...@@ -1636,8 +1632,6 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) ...@@ -1636,8 +1632,6 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
} }
if (io_should_trigger_evfd(ctx)) if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1); eventfd_signal(ctx->cq_ev_fd, 1);
if (waitqueue_active(&ctx->poll_wait))
wake_up_interruptible(&ctx->poll_wait);
} }
/* Returns true if there are no backlogged entries after the flush */ /* Returns true if there are no backlogged entries after the flush */
...@@ -9253,7 +9247,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) ...@@ -9253,7 +9247,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
struct io_ring_ctx *ctx = file->private_data; struct io_ring_ctx *ctx = file->private_data;
__poll_t mask = 0; __poll_t mask = 0;
poll_wait(file, &ctx->poll_wait, wait); poll_wait(file, &ctx->cq_wait, wait);
/* /*
* synchronizes with barrier from wq_has_sleeper call in * synchronizes with barrier from wq_has_sleeper call in
* io_commit_cqring * io_commit_cqring
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment