Commit 40d8ddd4 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: use completion list for CQ overflow

As with the completion path, also use compl.list for overflowed
requests. If cleaned up properly, nobody needs per-op data there
anymore.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d21ffe7e
...@@ -1339,8 +1339,8 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -1339,8 +1339,8 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
break; break;
req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb, req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
list); compl.list);
list_move(&req->list, &list); list_move(&req->compl.list, &list);
req->flags &= ~REQ_F_OVERFLOW; req->flags &= ~REQ_F_OVERFLOW;
if (cqe) { if (cqe) {
WRITE_ONCE(cqe->user_data, req->user_data); WRITE_ONCE(cqe->user_data, req->user_data);
...@@ -1362,8 +1362,8 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -1362,8 +1362,8 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
while (!list_empty(&list)) { while (!list_empty(&list)) {
req = list_first_entry(&list, struct io_kiocb, list); req = list_first_entry(&list, struct io_kiocb, compl.list);
list_del(&req->list); list_del(&req->compl.list);
io_put_req(req); io_put_req(req);
} }
...@@ -1396,11 +1396,12 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) ...@@ -1396,11 +1396,12 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
set_bit(0, &ctx->cq_check_overflow); set_bit(0, &ctx->cq_check_overflow);
ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
} }
io_clean_op(req);
req->flags |= REQ_F_OVERFLOW; req->flags |= REQ_F_OVERFLOW;
refcount_inc(&req->refs);
req->result = res; req->result = res;
req->cflags = cflags; req->cflags = cflags;
list_add_tail(&req->list, &ctx->cq_overflow_list); refcount_inc(&req->refs);
list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
} }
} }
...@@ -7835,7 +7836,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, ...@@ -7835,7 +7836,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
if (cancel_req->flags & REQ_F_OVERFLOW) { if (cancel_req->flags & REQ_F_OVERFLOW) {
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
list_del(&cancel_req->list); list_del(&cancel_req->compl.list);
cancel_req->flags &= ~REQ_F_OVERFLOW; cancel_req->flags &= ~REQ_F_OVERFLOW;
if (list_empty(&ctx->cq_overflow_list)) { if (list_empty(&ctx->cq_overflow_list)) {
clear_bit(0, &ctx->sq_check_overflow); clear_bit(0, &ctx->sq_check_overflow);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment