Commit 3fe07bcd authored by Jens Axboe's avatar Jens Axboe

io_uring: cleanup handling of the two task_work lists

Rather than pass in a bool for whether or not this work item needs to go
into the priority list or not, provide separate helpers for it. For most
use cases, this also then gets rid of the branch for non-priority task
work.

While at it, rename the prior_task_list to prio_task_list. Prior is
a confusing name for it, as it would seem to indicate that this is the
previous task_work list. prio makes it clear that this is a priority
task_work list.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 58e5bdeb
...@@ -523,7 +523,7 @@ struct io_uring_task { ...@@ -523,7 +523,7 @@ struct io_uring_task {
spinlock_t task_lock; spinlock_t task_lock;
struct io_wq_work_list task_list; struct io_wq_work_list task_list;
struct io_wq_work_list prior_task_list; struct io_wq_work_list prio_task_list;
struct callback_head task_work; struct callback_head task_work;
struct file **registered_rings; struct file **registered_rings;
bool task_running; bool task_running;
...@@ -2893,10 +2893,10 @@ static void tctx_task_work(struct callback_head *cb) ...@@ -2893,10 +2893,10 @@ static void tctx_task_work(struct callback_head *cb)
struct io_wq_work_node *node1, *node2; struct io_wq_work_node *node1, *node2;
spin_lock_irq(&tctx->task_lock); spin_lock_irq(&tctx->task_lock);
node1 = tctx->prior_task_list.first; node1 = tctx->prio_task_list.first;
node2 = tctx->task_list.first; node2 = tctx->task_list.first;
INIT_WQ_LIST(&tctx->task_list); INIT_WQ_LIST(&tctx->task_list);
INIT_WQ_LIST(&tctx->prior_task_list); INIT_WQ_LIST(&tctx->prio_task_list);
if (!node2 && !node1) if (!node2 && !node1)
tctx->task_running = false; tctx->task_running = false;
spin_unlock_irq(&tctx->task_lock); spin_unlock_irq(&tctx->task_lock);
...@@ -2910,7 +2910,7 @@ static void tctx_task_work(struct callback_head *cb) ...@@ -2910,7 +2910,7 @@ static void tctx_task_work(struct callback_head *cb)
cond_resched(); cond_resched();
if (data_race(!tctx->task_list.first) && if (data_race(!tctx->task_list.first) &&
data_race(!tctx->prior_task_list.first) && uring_locked) data_race(!tctx->prio_task_list.first) && uring_locked)
io_submit_flush_completions(ctx); io_submit_flush_completions(ctx);
} }
...@@ -2921,24 +2921,19 @@ static void tctx_task_work(struct callback_head *cb) ...@@ -2921,24 +2921,19 @@ static void tctx_task_work(struct callback_head *cb)
io_uring_drop_tctx_refs(current); io_uring_drop_tctx_refs(current);
} }
static void io_req_task_work_add(struct io_kiocb *req, bool priority) static void __io_req_task_work_add(struct io_kiocb *req,
struct io_uring_task *tctx,
struct io_wq_work_list *list)
{ {
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_uring_task *tctx = tsk->io_uring;
struct io_wq_work_node *node; struct io_wq_work_node *node;
unsigned long flags; unsigned long flags;
bool running; bool running;
WARN_ON_ONCE(!tctx);
io_drop_inflight_file(req); io_drop_inflight_file(req);
spin_lock_irqsave(&tctx->task_lock, flags); spin_lock_irqsave(&tctx->task_lock, flags);
if (priority) wq_list_add_tail(&req->io_task_work.node, list);
wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
else
wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
running = tctx->task_running; running = tctx->task_running;
if (!running) if (!running)
tctx->task_running = true; tctx->task_running = true;
...@@ -2951,12 +2946,12 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority) ...@@ -2951,12 +2946,12 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
if (likely(!task_work_add(tsk, &tctx->task_work, ctx->notify_method))) if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
return; return;
spin_lock_irqsave(&tctx->task_lock, flags); spin_lock_irqsave(&tctx->task_lock, flags);
tctx->task_running = false; tctx->task_running = false;
node = wq_list_merge(&tctx->prior_task_list, &tctx->task_list); node = wq_list_merge(&tctx->prio_task_list, &tctx->task_list);
spin_unlock_irqrestore(&tctx->task_lock, flags); spin_unlock_irqrestore(&tctx->task_lock, flags);
while (node) { while (node) {
...@@ -2968,6 +2963,23 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority) ...@@ -2968,6 +2963,23 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
} }
} }
static void io_req_task_work_add(struct io_kiocb *req)
{
struct io_uring_task *tctx = req->task->io_uring;
__io_req_task_work_add(req, tctx, &tctx->task_list);
}
static void io_req_task_prio_work_add(struct io_kiocb *req)
{
struct io_uring_task *tctx = req->task->io_uring;
if (req->ctx->flags & IORING_SETUP_SQPOLL)
__io_req_task_work_add(req, tctx, &tctx->prio_task_list);
else
__io_req_task_work_add(req, tctx, &tctx->task_list);
}
static void io_req_tw_post(struct io_kiocb *req, bool *locked) static void io_req_tw_post(struct io_kiocb *req, bool *locked)
{ {
io_req_complete_post(req, req->cqe.res, req->cqe.flags); io_req_complete_post(req, req->cqe.res, req->cqe.flags);
...@@ -2978,7 +2990,7 @@ static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags) ...@@ -2978,7 +2990,7 @@ static void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags)
req->cqe.res = res; req->cqe.res = res;
req->cqe.flags = cflags; req->cqe.flags = cflags;
req->io_task_work.func = io_req_tw_post; req->io_task_work.func = io_req_tw_post;
io_req_task_work_add(req, false); io_req_task_work_add(req);
} }
static void io_req_task_cancel(struct io_kiocb *req, bool *locked) static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
...@@ -3002,19 +3014,19 @@ static void io_req_task_queue_fail(struct io_kiocb *req, int ret) ...@@ -3002,19 +3014,19 @@ static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
{ {
req->cqe.res = ret; req->cqe.res = ret;
req->io_task_work.func = io_req_task_cancel; req->io_task_work.func = io_req_task_cancel;
io_req_task_work_add(req, false); io_req_task_work_add(req);
} }
static void io_req_task_queue(struct io_kiocb *req) static void io_req_task_queue(struct io_kiocb *req)
{ {
req->io_task_work.func = io_req_task_submit; req->io_task_work.func = io_req_task_submit;
io_req_task_work_add(req, false); io_req_task_work_add(req);
} }
static void io_req_task_queue_reissue(struct io_kiocb *req) static void io_req_task_queue_reissue(struct io_kiocb *req)
{ {
req->io_task_work.func = io_queue_iowq; req->io_task_work.func = io_queue_iowq;
io_req_task_work_add(req, false); io_req_task_work_add(req);
} }
static void io_queue_next(struct io_kiocb *req) static void io_queue_next(struct io_kiocb *req)
...@@ -3422,7 +3434,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res) ...@@ -3422,7 +3434,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
return; return;
req->cqe.res = res; req->cqe.res = res;
req->io_task_work.func = io_req_task_complete; req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL)); io_req_task_prio_work_add(req);
} }
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
...@@ -4924,7 +4936,7 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, ...@@ -4924,7 +4936,7 @@ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
req->uring_cmd.task_work_cb = task_work_cb; req->uring_cmd.task_work_cb = task_work_cb;
req->io_task_work.func = io_uring_cmd_work; req->io_task_work.func = io_uring_cmd_work;
io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL)); io_req_task_prio_work_add(req);
} }
EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task); EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
...@@ -6764,7 +6776,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events) ...@@ -6764,7 +6776,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
req->io_task_work.func = io_apoll_task_func; req->io_task_work.func = io_apoll_task_func;
trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask); trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
io_req_task_work_add(req, false); io_req_task_work_add(req);
} }
static inline void io_poll_execute(struct io_kiocb *req, int res, int events) static inline void io_poll_execute(struct io_kiocb *req, int res, int events)
...@@ -7265,7 +7277,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) ...@@ -7265,7 +7277,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
req->cqe.res = -ETIME; req->cqe.res = -ETIME;
req->io_task_work.func = io_req_task_complete; req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req, false); io_req_task_work_add(req);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -8385,7 +8397,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) ...@@ -8385,7 +8397,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->timeout_lock, flags); spin_unlock_irqrestore(&ctx->timeout_lock, flags);
req->io_task_work.func = io_req_task_link_timeout; req->io_task_work.func = io_req_task_link_timeout;
io_req_task_work_add(req, false); io_req_task_work_add(req);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -10066,7 +10078,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task, ...@@ -10066,7 +10078,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
task->io_uring = tctx; task->io_uring = tctx;
spin_lock_init(&tctx->task_lock); spin_lock_init(&tctx->task_lock);
INIT_WQ_LIST(&tctx->task_list); INIT_WQ_LIST(&tctx->task_list);
INIT_WQ_LIST(&tctx->prior_task_list); INIT_WQ_LIST(&tctx->prio_task_list);
init_task_work(&tctx->task_work, tctx_task_work); init_task_work(&tctx->task_work, tctx_task_work);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment