Commit 8f6ed49a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: kill cached_cq_overflow

There are two copies of cq_overflow, shared with userspace and internal
cached one. It was needed for DRAIN accounting, but now we have yet
another knob to tune the accounting, i.e. cq_extra, and we can throw
away the internal counter and just increment the one in the shared ring.

If user modifies it as so never gets the right overflow value ever
again, it's its problem, even though before we would have restored it
back by next overflow.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8427965f5175dd051febc63804909861109ce859.1621201931.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ea5ab3b5
...@@ -364,7 +364,6 @@ struct io_ring_ctx { ...@@ -364,7 +364,6 @@ struct io_ring_ctx {
unsigned sq_entries; unsigned sq_entries;
unsigned sq_thread_idle; unsigned sq_thread_idle;
unsigned cached_sq_dropped; unsigned cached_sq_dropped;
unsigned cached_cq_overflow;
unsigned long sq_check_overflow; unsigned long sq_check_overflow;
struct list_head defer_list; struct list_head defer_list;
...@@ -1199,13 +1198,20 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -1199,13 +1198,20 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
return NULL; return NULL;
} }
static void io_account_cq_overflow(struct io_ring_ctx *ctx)
{
struct io_rings *r = ctx->rings;
WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
ctx->cq_extra--;
}
static bool req_need_defer(struct io_kiocb *req, u32 seq) static bool req_need_defer(struct io_kiocb *req, u32 seq)
{ {
if (unlikely(req->flags & REQ_F_IO_DRAIN)) { if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
return seq + ctx->cq_extra != ctx->cached_cq_tail return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
+ READ_ONCE(ctx->cached_cq_overflow);
} }
return false; return false;
...@@ -1444,8 +1450,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) ...@@ -1444,8 +1450,8 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
if (cqe) if (cqe)
memcpy(cqe, &ocqe->cqe, sizeof(*cqe)); memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
else else
WRITE_ONCE(ctx->rings->cq_overflow, io_account_cq_overflow(ctx);
++ctx->cached_cq_overflow);
posted = true; posted = true;
list_del(&ocqe->list); list_del(&ocqe->list);
kfree(ocqe); kfree(ocqe);
...@@ -1529,7 +1535,7 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, ...@@ -1529,7 +1535,7 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
* or cannot allocate an overflow entry, then we need to drop it * or cannot allocate an overflow entry, then we need to drop it
* on the floor. * on the floor.
*/ */
WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow); io_account_cq_overflow(ctx);
return false; return false;
} }
if (list_empty(&ctx->cq_overflow_list)) { if (list_empty(&ctx->cq_overflow_list)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment