Commit 91c7884a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: remove IOU_F_TWQ_FORCE_NORMAL

Extract a function for non-local task_work_add, and use it directly from
io_move_task_work_from_local(). Now we don't use IOU_F_TWQ_FORCE_NORMAL
and it can be killed.

As a small positive side effect we don't grab task->io_uring in
io_req_normal_work_add anymore, which is not needed for
io_req_local_work_add().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/2e55571e8ff2927ae3cc12da606d204e2485525b.1687518903.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2fdd6fb5
...@@ -1317,7 +1317,7 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx) ...@@ -1317,7 +1317,7 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx)
} }
} }
static void io_req_local_work_add(struct io_kiocb *req, unsigned flags) static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
unsigned nr_wait, nr_tw, nr_tw_prev; unsigned nr_wait, nr_tw, nr_tw_prev;
...@@ -1368,19 +1368,11 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags) ...@@ -1368,19 +1368,11 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE); wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
} }
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) static void io_req_normal_work_add(struct io_kiocb *req)
{ {
struct io_uring_task *tctx = req->task->io_uring; struct io_uring_task *tctx = req->task->io_uring;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
if (!(flags & IOU_F_TWQ_FORCE_NORMAL) &&
(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) {
rcu_read_lock();
io_req_local_work_add(req, flags);
rcu_read_unlock();
return;
}
/* task_work already pending, we're done */ /* task_work already pending, we're done */
if (!llist_add(&req->io_task_work.node, &tctx->task_list)) if (!llist_add(&req->io_task_work.node, &tctx->task_list))
return; return;
...@@ -1394,6 +1386,17 @@ void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) ...@@ -1394,6 +1386,17 @@ void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
io_fallback_tw(tctx); io_fallback_tw(tctx);
} }
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
{
if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
rcu_read_lock();
io_req_local_work_add(req, flags);
rcu_read_unlock();
} else {
io_req_normal_work_add(req);
}
}
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
{ {
struct llist_node *node; struct llist_node *node;
...@@ -1404,7 +1407,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) ...@@ -1404,7 +1407,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
io_task_work.node); io_task_work.node);
node = node->next; node = node->next;
__io_req_task_work_add(req, IOU_F_TWQ_FORCE_NORMAL); io_req_normal_work_add(req);
} }
} }
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
#endif #endif
enum { enum {
/* don't use deferred task_work */
IOU_F_TWQ_FORCE_NORMAL = 1,
/* /*
* A hint to not wake right away but delay until there are enough of * A hint to not wake right away but delay until there are enough of
* tw's queued to match the number of CQEs the task is waiting for. * tw's queued to match the number of CQEs the task is waiting for.
...@@ -26,7 +23,7 @@ enum { ...@@ -26,7 +23,7 @@ enum {
* Must not be used wirh requests generating more than one CQE. * Must not be used wirh requests generating more than one CQE.
* It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
*/ */
IOU_F_TWQ_LAZY_WAKE = 2, IOU_F_TWQ_LAZY_WAKE = 1,
}; };
enum { enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment