Commit c6dd763c authored by Dylan Yudaken's avatar Dylan Yudaken Committed by Jens Axboe

io_uring: trace task_work_run

trace task_work_run to help provide stats on how often task work is run
and what batch sizes are coming through.
Signed-off-by: default avatarDylan Yudaken <dylany@fb.com>
Link: https://lore.kernel.org/r/20220622134028.2013417-9-dylany@fb.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent eccd8801
...@@ -986,10 +986,12 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) ...@@ -986,10 +986,12 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
percpu_ref_put(&ctx->refs); percpu_ref_put(&ctx->refs);
} }
static void handle_tw_list(struct llist_node *node, static unsigned int handle_tw_list(struct llist_node *node,
struct io_ring_ctx **ctx, bool *locked, struct io_ring_ctx **ctx, bool *locked,
struct llist_node *last) struct llist_node *last)
{ {
unsigned int count = 0;
while (node != last) { while (node != last) {
struct llist_node *next = node->next; struct llist_node *next = node->next;
struct io_kiocb *req = container_of(node, struct io_kiocb, struct io_kiocb *req = container_of(node, struct io_kiocb,
...@@ -1006,7 +1008,10 @@ static void handle_tw_list(struct llist_node *node, ...@@ -1006,7 +1008,10 @@ static void handle_tw_list(struct llist_node *node,
} }
req->io_task_work.func(req, locked); req->io_task_work.func(req, locked);
node = next; node = next;
count++;
} }
return count;
} }
/** /**
...@@ -1047,12 +1052,14 @@ void tctx_task_work(struct callback_head *cb) ...@@ -1047,12 +1052,14 @@ void tctx_task_work(struct callback_head *cb)
task_work); task_work);
struct llist_node fake = {}; struct llist_node fake = {};
struct llist_node *node = io_llist_xchg(&tctx->task_list, &fake); struct llist_node *node = io_llist_xchg(&tctx->task_list, &fake);
unsigned int loops = 1;
unsigned int count = handle_tw_list(node, &ctx, &uring_locked, NULL);
handle_tw_list(node, &ctx, &uring_locked, NULL);
node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
while (node != &fake) { while (node != &fake) {
loops++;
node = io_llist_xchg(&tctx->task_list, &fake); node = io_llist_xchg(&tctx->task_list, &fake);
handle_tw_list(node, &ctx, &uring_locked, &fake); count += handle_tw_list(node, &ctx, &uring_locked, &fake);
node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
} }
...@@ -1061,6 +1068,8 @@ void tctx_task_work(struct callback_head *cb) ...@@ -1061,6 +1068,8 @@ void tctx_task_work(struct callback_head *cb)
/* relaxed read is enough as only the task itself sets ->in_idle */ /* relaxed read is enough as only the task itself sets ->in_idle */
if (unlikely(atomic_read(&tctx->in_idle))) if (unlikely(atomic_read(&tctx->in_idle)))
io_uring_drop_tctx_refs(current); io_uring_drop_tctx_refs(current);
trace_io_uring_task_work_run(tctx, count, loops);
} }
void io_req_task_work_add(struct io_kiocb *req) void io_req_task_work_add(struct io_kiocb *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment