Commit 502c87d6 authored by Stefan Roesch's avatar Stefan Roesch Committed by Jens Axboe

io-uring: Make tracepoints consistent.

This makes the io-uring tracepoints consistent. Where it makes sense
the tracepoints start with the following four fields:
- context (ring)
- request
- user_data
- opcode.
Signed-off-by: default avatarStefan Roesch <shr@fb.com>
Link: https://lore.kernel.org/r/20220214180430.70572-3-shr@fb.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d5ec1dfa
...@@ -1603,8 +1603,8 @@ static void io_queue_async_work(struct io_kiocb *req, bool *dont_use) ...@@ -1603,8 +1603,8 @@ static void io_queue_async_work(struct io_kiocb *req, bool *dont_use)
if (WARN_ON_ONCE(!same_thread_group(req->task, current))) if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
req->work.flags |= IO_WQ_WORK_CANCEL; req->work.flags |= IO_WQ_WORK_CANCEL;
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, trace_io_uring_queue_async_work(ctx, req, req->user_data, req->opcode, req->flags,
&req->work, req->flags); &req->work, io_wq_is_hashed(&req->work));
io_wq_enqueue(tctx->io_wq, &req->work); io_wq_enqueue(tctx->io_wq, &req->work);
if (link) if (link)
io_queue_linked_timeout(link); io_queue_linked_timeout(link);
...@@ -1936,7 +1936,7 @@ static inline bool __fill_cqe(struct io_ring_ctx *ctx, u64 user_data, ...@@ -1936,7 +1936,7 @@ static inline bool __fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
static inline bool __io_fill_cqe(struct io_kiocb *req, s32 res, u32 cflags) static inline bool __io_fill_cqe(struct io_kiocb *req, s32 res, u32 cflags)
{ {
trace_io_uring_complete(req->ctx, req->user_data, res, cflags); trace_io_uring_complete(req->ctx, req, req->user_data, res, cflags);
return __fill_cqe(req->ctx, req->user_data, res, cflags); return __fill_cqe(req->ctx, req->user_data, res, cflags);
} }
...@@ -1950,7 +1950,7 @@ static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, ...@@ -1950,7 +1950,7 @@ static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
s32 res, u32 cflags) s32 res, u32 cflags)
{ {
ctx->cq_extra++; ctx->cq_extra++;
trace_io_uring_complete(ctx, user_data, res, cflags); trace_io_uring_complete(ctx, NULL, user_data, res, cflags);
return __fill_cqe(ctx, user_data, res, cflags); return __fill_cqe(ctx, user_data, res, cflags);
} }
...@@ -2202,7 +2202,9 @@ static void io_fail_links(struct io_kiocb *req) ...@@ -2202,7 +2202,9 @@ static void io_fail_links(struct io_kiocb *req)
nxt = link->link; nxt = link->link;
link->link = NULL; link->link = NULL;
trace_io_uring_fail_link(req, link); trace_io_uring_fail_link(req->ctx, req, req->user_data,
req->opcode, link);
if (!ignore_cqes) { if (!ignore_cqes) {
link->flags &= ~REQ_F_CQE_SKIP; link->flags &= ~REQ_F_CQE_SKIP;
io_fill_cqe_req(link, res, 0); io_fill_cqe_req(link, res, 0);
...@@ -5629,7 +5631,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask) ...@@ -5629,7 +5631,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask)
else else
req->io_task_work.func = io_apoll_task_func; req->io_task_work.func = io_apoll_task_func;
trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); trace_io_uring_task_add(req->ctx, req, req->user_data, req->opcode, mask);
io_req_task_work_add(req, false); io_req_task_work_add(req, false);
} }
...@@ -5858,7 +5860,7 @@ static int io_arm_poll_handler(struct io_kiocb *req) ...@@ -5858,7 +5860,7 @@ static int io_arm_poll_handler(struct io_kiocb *req)
if (ret || ipt.error) if (ret || ipt.error)
return ret ? IO_APOLL_READY : IO_APOLL_ABORTED; return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, trace_io_uring_poll_arm(ctx, req, req->user_data, req->opcode,
mask, apoll->poll.events); mask, apoll->poll.events);
return IO_APOLL_OK; return IO_APOLL_OK;
} }
...@@ -6667,7 +6669,7 @@ static __cold void io_drain_req(struct io_kiocb *req) ...@@ -6667,7 +6669,7 @@ static __cold void io_drain_req(struct io_kiocb *req)
goto queue; goto queue;
} }
trace_io_uring_defer(ctx, req, req->user_data); trace_io_uring_defer(ctx, req, req->user_data, req->opcode);
de->req = req; de->req = req;
de->seq = seq; de->seq = seq;
list_add_tail(&de->list, &ctx->defer_list); list_add_tail(&de->list, &ctx->defer_list);
...@@ -7001,7 +7003,7 @@ static struct file *io_file_get_normal(struct io_ring_ctx *ctx, ...@@ -7001,7 +7003,7 @@ static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
{ {
struct file *file = fget(fd); struct file *file = fget(fd);
trace_io_uring_file_get(ctx, fd); trace_io_uring_file_get(ctx, req, req->user_data, fd);
/* we don't allow fixed io_uring files */ /* we don't allow fixed io_uring files */
if (file && unlikely(file->f_op == &io_uring_fops)) if (file && unlikely(file->f_op == &io_uring_fops))
...@@ -7299,7 +7301,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -7299,7 +7301,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
ret = io_init_req(ctx, req, sqe); ret = io_init_req(ctx, req, sqe);
if (unlikely(ret)) { if (unlikely(ret)) {
trace_io_uring_req_failed(sqe, ret); trace_io_uring_req_failed(sqe, ctx, req, ret);
/* fail even hard links since we don't submit */ /* fail even hard links since we don't submit */
if (link->head) { if (link->head) {
...@@ -7326,7 +7328,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -7326,7 +7328,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
} }
/* don't need @sqe from now on */ /* don't need @sqe from now on */
trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, trace_io_uring_submit_sqe(ctx, req, req->user_data, req->opcode,
req->flags, true, req->flags, true,
ctx->flags & IORING_SETUP_SQPOLL); ctx->flags & IORING_SETUP_SQPOLL);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment