Commit aff5b2df authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: better caching for ctx timeout fields

Following timeout fields access patterns, move all of them into a
separate cache line inside ctx, so they don't intervene with normal
completion caching, especially since timeout removals and completion
are separated and the later is done via tw.

It also sheds some bytes from io_ring_ctx, 1216B -> 1152B
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4b163793072840de53b3cb66e0c2995e7226ff78.1655310733.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b2543603
...@@ -181,8 +181,6 @@ struct io_ring_ctx { ...@@ -181,8 +181,6 @@ struct io_ring_ctx {
struct xarray io_bl_xa; struct xarray io_bl_xa;
struct list_head io_buffers_cache; struct list_head io_buffers_cache;
struct list_head timeout_list;
struct list_head ltimeout_list;
struct list_head cq_overflow_list; struct list_head cq_overflow_list;
struct list_head apoll_cache; struct list_head apoll_cache;
struct xarray personalities; struct xarray personalities;
...@@ -215,15 +213,11 @@ struct io_ring_ctx { ...@@ -215,15 +213,11 @@ struct io_ring_ctx {
struct io_ev_fd __rcu *io_ev_fd; struct io_ev_fd __rcu *io_ev_fd;
struct wait_queue_head cq_wait; struct wait_queue_head cq_wait;
unsigned cq_extra; unsigned cq_extra;
atomic_t cq_timeouts;
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct { struct {
spinlock_t completion_lock; spinlock_t completion_lock;
spinlock_t timeout_lock;
/* /*
* ->iopoll_list is protected by the ctx->uring_lock for * ->iopoll_list is protected by the ctx->uring_lock for
* io_uring instances that don't use IORING_SETUP_SQPOLL. * io_uring instances that don't use IORING_SETUP_SQPOLL.
...@@ -255,6 +249,15 @@ struct io_ring_ctx { ...@@ -255,6 +249,15 @@ struct io_ring_ctx {
struct list_head io_buffers_pages; struct list_head io_buffers_pages;
}; };
/* timeouts */
struct {
spinlock_t timeout_lock;
atomic_t cq_timeouts;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp;
/* Keep this last, we don't need it for the fast path */ /* Keep this last, we don't need it for the fast path */
struct { struct {
#if defined(CONFIG_UNIX) #if defined(CONFIG_UNIX)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment