Commit f43de1f8 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: unite fill_cqe and the 32B version

We want just one function that will handle both normal cqes and 32B
cqes. Combine __io_fill_cqe_req() and __io_fill_cqe_req32(). It's still
not entirely correct yet, but saves us from cases when we fill an CQE of
a wrong size.

Fixes: 76c68fbf ("io_uring: enable CQE32")
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8085c5b2f74141520f60decd45334f87e389b718.1655287457.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 91ef75a7
...@@ -2469,6 +2469,7 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, ...@@ -2469,6 +2469,7 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
{ {
struct io_uring_cqe *cqe; struct io_uring_cqe *cqe;
if (!(ctx->flags & IORING_SETUP_CQE32)) {
trace_io_uring_complete(req->ctx, req, req->cqe.user_data, trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
req->cqe.res, req->cqe.flags, 0, 0); req->cqe.res, req->cqe.flags, 0, 0);
...@@ -2482,8 +2483,34 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, ...@@ -2482,8 +2483,34 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
memcpy(cqe, &req->cqe, sizeof(*cqe)); memcpy(cqe, &req->cqe, sizeof(*cqe));
return true; return true;
} }
return io_cqring_event_overflow(ctx, req->cqe.user_data, return io_cqring_event_overflow(ctx, req->cqe.user_data,
req->cqe.res, req->cqe.flags, 0, 0); req->cqe.res, req->cqe.flags,
0, 0);
} else {
u64 extra1 = req->extra1;
u64 extra2 = req->extra2;
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
req->cqe.res, req->cqe.flags, extra1, extra2);
/*
* If we can't get a cq entry, userspace overflowed the
* submission (by quite a lot). Increment the overflow count in
* the ring.
*/
cqe = io_get_cqe(ctx);
if (likely(cqe)) {
memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
WRITE_ONCE(cqe->big_cqe[0], extra1);
WRITE_ONCE(cqe->big_cqe[1], extra2);
return true;
}
return io_cqring_event_overflow(ctx, req->cqe.user_data,
req->cqe.res, req->cqe.flags,
extra1, extra2);
}
} }
static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx, static inline bool __io_fill_cqe32_req(struct io_ring_ctx *ctx,
...@@ -3175,12 +3202,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) ...@@ -3175,12 +3202,8 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_kiocb *req = container_of(node, struct io_kiocb, struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list); comp_list);
if (!(req->flags & REQ_F_CQE_SKIP)) { if (!(req->flags & REQ_F_CQE_SKIP))
if (!(ctx->flags & IORING_SETUP_CQE32))
__io_fill_cqe_req(ctx, req); __io_fill_cqe_req(ctx, req);
else
__io_fill_cqe32_req(ctx, req);
}
} }
io_commit_cqring(ctx); io_commit_cqring(ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment