Commit 22eb2a3f authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: refactor ctx slow data placement

Shove all slow path data at the end of ctx and get rid of extra
indention.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/bcaf200298dd469af20787650550efc66d89bef2.1655310733.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent aff5b2df
...@@ -185,7 +185,6 @@ struct io_ring_ctx { ...@@ -185,7 +185,6 @@ struct io_ring_ctx {
struct list_head apoll_cache; struct list_head apoll_cache;
struct xarray personalities; struct xarray personalities;
u32 pers_next; u32 pers_next;
unsigned sq_thread_idle;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/* IRQ completion list, under ->completion_lock */ /* IRQ completion list, under ->completion_lock */
...@@ -232,23 +231,6 @@ struct io_ring_ctx { ...@@ -232,23 +231,6 @@ struct io_ring_ctx {
struct list_head io_buffers_comp; struct list_head io_buffers_comp;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct io_restriction restrictions;
/* slow path rsrc auxilary data, used by update/register */
struct {
struct io_rsrc_node *rsrc_backup_node;
struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;
struct delayed_work rsrc_put_work;
struct llist_head rsrc_put_llist;
struct list_head rsrc_ref_list;
spinlock_t rsrc_ref_lock;
struct list_head io_buffers_pages;
};
/* timeouts */ /* timeouts */
struct { struct {
spinlock_t timeout_lock; spinlock_t timeout_lock;
...@@ -259,30 +241,45 @@ struct io_ring_ctx { ...@@ -259,30 +241,45 @@ struct io_ring_ctx {
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/* Keep this last, we don't need it for the fast path */ /* Keep this last, we don't need it for the fast path */
struct {
#if defined(CONFIG_UNIX) struct io_restriction restrictions;
struct socket *ring_sock;
#endif /* slow path rsrc auxilary data, used by update/register */
/* hashed buffered write serialization */ struct io_rsrc_node *rsrc_backup_node;
struct io_wq_hash *hash_map; struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
/* Only used for accounting purposes */ struct io_rsrc_data *buf_data;
struct user_struct *user;
struct mm_struct *mm_account; struct delayed_work rsrc_put_work;
struct llist_head rsrc_put_llist;
/* ctx exit and cancelation */ struct list_head rsrc_ref_list;
struct llist_head fallback_llist; spinlock_t rsrc_ref_lock;
struct delayed_work fallback_work;
struct work_struct exit_work; struct list_head io_buffers_pages;
struct list_head tctx_list;
struct completion ref_comp; #if defined(CONFIG_UNIX)
struct socket *ring_sock;
/* io-wq management, e.g. thread count */ #endif
u32 iowq_limits[2]; /* hashed buffered write serialization */
bool iowq_limits_set; struct io_wq_hash *hash_map;
struct list_head defer_list; /* Only used for accounting purposes */
}; struct user_struct *user;
struct mm_struct *mm_account;
/* ctx exit and cancelation */
struct llist_head fallback_llist;
struct delayed_work fallback_work;
struct work_struct exit_work;
struct list_head tctx_list;
struct completion ref_comp;
/* io-wq management, e.g. thread count */
u32 iowq_limits[2];
bool iowq_limits_set;
struct list_head defer_list;
unsigned sq_thread_idle;
}; };
enum { enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment