Commit 498ccd9e authored by Jens Axboe's avatar Jens Axboe

io_uring: used cached copies of sq->dropped and cq->overflow

We currently use the ring values directly, but that can lead to issues
if the application is malicious and changes these values on our behalf.
Created in-kernel cached versions of them, and just overwrite the user
side when we update them. This is similar to how we treat the sq/cq
ring tail/head updates.
Reported-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Reviewed-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 935d1e45
...@@ -197,6 +197,7 @@ struct io_ring_ctx { ...@@ -197,6 +197,7 @@ struct io_ring_ctx {
unsigned sq_entries; unsigned sq_entries;
unsigned sq_mask; unsigned sq_mask;
unsigned sq_thread_idle; unsigned sq_thread_idle;
unsigned cached_sq_dropped;
struct io_uring_sqe *sq_sqes; struct io_uring_sqe *sq_sqes;
struct list_head defer_list; struct list_head defer_list;
...@@ -212,6 +213,7 @@ struct io_ring_ctx { ...@@ -212,6 +213,7 @@ struct io_ring_ctx {
struct { struct {
unsigned cached_cq_tail; unsigned cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned cq_entries; unsigned cq_entries;
unsigned cq_mask; unsigned cq_mask;
struct wait_queue_head cq_wait; struct wait_queue_head cq_wait;
...@@ -420,7 +422,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ...@@ -420,7 +422,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
static inline bool __io_sequence_defer(struct io_ring_ctx *ctx, static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
struct io_kiocb *req) struct io_kiocb *req)
{ {
return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped; return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ atomic_read(&ctx->cached_cq_overflow);
} }
static inline bool io_sequence_defer(struct io_ring_ctx *ctx, static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
...@@ -567,9 +570,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data, ...@@ -567,9 +570,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, 0); WRITE_ONCE(cqe->flags, 0);
} else { } else {
unsigned overflow = READ_ONCE(ctx->rings->cq_overflow); WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
} }
} }
...@@ -2564,7 +2566,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) ...@@ -2564,7 +2566,8 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
/* drop invalid entries */ /* drop invalid entries */
ctx->cached_sq_head++; ctx->cached_sq_head++;
rings->sq_dropped++; ctx->cached_sq_dropped++;
WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment