Commit 8d09a88e authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: always lock __io_cqring_overflow_flush

Conditional locking is never great, in case of
__io_cqring_overflow_flush(), which is a slow path, it's not justified.
Don't handle IOPOLL separately, always grab uring_lock for overflow
flushing.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/162947df299aa12693ac4b305dacedab32ec7976.1712708261.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 408024b9
...@@ -673,6 +673,8 @@ static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) ...@@ -673,6 +673,8 @@ static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
struct io_overflow_cqe *ocqe; struct io_overflow_cqe *ocqe;
LIST_HEAD(list); LIST_HEAD(list);
lockdep_assert_held(&ctx->uring_lock);
spin_lock(&ctx->completion_lock); spin_lock(&ctx->completion_lock);
list_splice_init(&ctx->cq_overflow_list, &list); list_splice_init(&ctx->cq_overflow_list, &list);
clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
...@@ -689,6 +691,8 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) ...@@ -689,6 +691,8 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{ {
size_t cqe_size = sizeof(struct io_uring_cqe); size_t cqe_size = sizeof(struct io_uring_cqe);
lockdep_assert_held(&ctx->uring_lock);
if (__io_cqring_events(ctx) == ctx->cq_entries) if (__io_cqring_events(ctx) == ctx->cq_entries)
return; return;
...@@ -718,12 +722,9 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx) ...@@ -718,12 +722,9 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
{ {
/* iopoll syncs against uring_lock, not completion_lock */ mutex_lock(&ctx->uring_lock);
if (ctx->flags & IORING_SETUP_IOPOLL)
mutex_lock(&ctx->uring_lock);
__io_cqring_overflow_flush(ctx); __io_cqring_overflow_flush(ctx);
if (ctx->flags & IORING_SETUP_IOPOLL) mutex_unlock(&ctx->uring_lock);
mutex_unlock(&ctx->uring_lock);
} }
/* can be called by any task */ /* can be called by any task */
...@@ -1522,6 +1523,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) ...@@ -1522,6 +1523,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
unsigned int nr_events = 0; unsigned int nr_events = 0;
unsigned long check_cq; unsigned long check_cq;
lockdep_assert_held(&ctx->uring_lock);
if (!io_allowed_run_tw(ctx)) if (!io_allowed_run_tw(ctx))
return -EEXIST; return -EEXIST;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment