Commit f5fa38c5 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_wq: add per-wq work handler instead of per work

io_uring is the only user of io-wq, and now it uses only io-wq callback
for all its requests, namely io_wq_submit_work(). Instead of storing
work->runner callback in each instance of io_wq_work, keep it in io-wq
itself.

pros:
- reduces io_wq_work size
- more robust -- ->func won't be invalidated with mem{cpy,set}(req)
- helps other work
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d4c81f38
...@@ -112,6 +112,7 @@ struct io_wq { ...@@ -112,6 +112,7 @@ struct io_wq {
unsigned long state; unsigned long state;
free_work_fn *free_work; free_work_fn *free_work;
io_wq_work_fn *do_work;
struct task_struct *manager; struct task_struct *manager;
struct user_struct *user; struct user_struct *user;
...@@ -528,7 +529,7 @@ static void io_worker_handle_work(struct io_worker *worker) ...@@ -528,7 +529,7 @@ static void io_worker_handle_work(struct io_worker *worker)
hash = io_get_work_hash(work); hash = io_get_work_hash(work);
linked = old_work = work; linked = old_work = work;
linked->func(&linked); wq->do_work(&linked);
linked = (old_work == linked) ? NULL : linked; linked = (old_work == linked) ? NULL : linked;
work = next_hashed; work = next_hashed;
...@@ -785,7 +786,7 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) ...@@ -785,7 +786,7 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
struct io_wq_work *old_work = work; struct io_wq_work *old_work = work;
work->flags |= IO_WQ_WORK_CANCEL; work->flags |= IO_WQ_WORK_CANCEL;
work->func(&work); wq->do_work(&work);
work = (work == old_work) ? NULL : work; work = (work == old_work) ? NULL : work;
wq->free_work(old_work); wq->free_work(old_work);
} while (work); } while (work);
...@@ -1023,7 +1024,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) ...@@ -1023,7 +1024,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
int ret = -ENOMEM, node; int ret = -ENOMEM, node;
struct io_wq *wq; struct io_wq *wq;
if (WARN_ON_ONCE(!data->free_work)) if (WARN_ON_ONCE(!data->free_work || !data->do_work))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
wq = kzalloc(sizeof(*wq), GFP_KERNEL); wq = kzalloc(sizeof(*wq), GFP_KERNEL);
...@@ -1037,6 +1038,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) ...@@ -1037,6 +1038,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
} }
wq->free_work = data->free_work; wq->free_work = data->free_work;
wq->do_work = data->do_work;
/* caller must already hold a reference to this */ /* caller must already hold a reference to this */
wq->user = data->user; wq->user = data->user;
...@@ -1093,7 +1095,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) ...@@ -1093,7 +1095,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
bool io_wq_get(struct io_wq *wq, struct io_wq_data *data) bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
{ {
if (data->free_work != wq->free_work) if (data->free_work != wq->free_work || data->do_work != wq->do_work)
return false; return false;
return refcount_inc_not_zero(&wq->use_refs); return refcount_inc_not_zero(&wq->use_refs);
......
...@@ -85,7 +85,6 @@ static inline void wq_list_del(struct io_wq_work_list *list, ...@@ -85,7 +85,6 @@ static inline void wq_list_del(struct io_wq_work_list *list,
struct io_wq_work { struct io_wq_work {
struct io_wq_work_node list; struct io_wq_work_node list;
void (*func)(struct io_wq_work **);
struct files_struct *files; struct files_struct *files;
struct mm_struct *mm; struct mm_struct *mm;
const struct cred *creds; const struct cred *creds;
...@@ -94,9 +93,9 @@ struct io_wq_work { ...@@ -94,9 +93,9 @@ struct io_wq_work {
pid_t task_pid; pid_t task_pid;
}; };
#define INIT_IO_WORK(work, _func) \ #define INIT_IO_WORK(work) \
do { \ do { \
*(work) = (struct io_wq_work){ .func = _func }; \ *(work) = (struct io_wq_work){}; \
} while (0) \ } while (0) \
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
...@@ -108,10 +107,12 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) ...@@ -108,10 +107,12 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
} }
typedef void (free_work_fn)(struct io_wq_work *); typedef void (free_work_fn)(struct io_wq_work *);
typedef void (io_wq_work_fn)(struct io_wq_work **);
struct io_wq_data { struct io_wq_data {
struct user_struct *user; struct user_struct *user;
io_wq_work_fn *do_work;
free_work_fn *free_work; free_work_fn *free_work;
}; };
......
...@@ -5776,7 +5776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -5776,7 +5776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
refcount_set(&req->refs, 2); refcount_set(&req->refs, 2);
req->task = NULL; req->task = NULL;
req->result = 0; req->result = 0;
INIT_IO_WORK(&req->work, io_wq_submit_work); INIT_IO_WORK(&req->work);
if (unlikely(req->opcode >= IORING_OP_LAST)) if (unlikely(req->opcode >= IORING_OP_LAST))
return -EINVAL; return -EINVAL;
...@@ -6796,6 +6796,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx, ...@@ -6796,6 +6796,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx,
data.user = ctx->user; data.user = ctx->user;
data.free_work = io_free_work; data.free_work = io_free_work;
data.do_work = io_wq_submit_work;
if (!(p->flags & IORING_SETUP_ATTACH_WQ)) { if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
/* Do QD, or 4 * CPUS, whatever is smallest */ /* Do QD, or 4 * CPUS, whatever is smallest */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment