Commit 1f8d5bbe authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring/iopoll: unify tw breaking logic

Let's keep checks for whether to break the iopoll loop or not same for
normal and defer tw, this includes ->cached_cq_tail checks guarding
against polling more than asked for.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d2fa8a44f8114f55a4807528da438cde93815360.1662652536.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9d54bd6a
...@@ -1428,22 +1428,22 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) ...@@ -1428,22 +1428,22 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
*/ */
if (wq_list_empty(&ctx->iopoll_list) || if (wq_list_empty(&ctx->iopoll_list) ||
io_task_work_pending(ctx)) { io_task_work_pending(ctx)) {
u32 tail = ctx->cached_cq_tail;
if (!llist_empty(&ctx->work_llist)) if (!llist_empty(&ctx->work_llist))
__io_run_local_work(ctx, true); __io_run_local_work(ctx, true);
if (task_work_pending(current) || if (task_work_pending(current) ||
wq_list_empty(&ctx->iopoll_list)) { wq_list_empty(&ctx->iopoll_list)) {
u32 tail = ctx->cached_cq_tail;
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
io_run_task_work(); io_run_task_work();
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
}
/* some requests don't go through iopoll_list */ /* some requests don't go through iopoll_list */
if (tail != ctx->cached_cq_tail || if (tail != ctx->cached_cq_tail ||
wq_list_empty(&ctx->iopoll_list)) wq_list_empty(&ctx->iopoll_list))
break; break;
} }
}
ret = io_do_iopoll(ctx, !min); ret = io_do_iopoll(ctx, !min);
if (ret < 0) if (ret < 0)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment