Commit daf763c2 authored by Takashi Sakamoto's avatar Takashi Sakamoto

firewire: core: add tracepoints events for completions of packets in isochronous context

It is helpful to trace completion of packets in isochronous context when
the core function is requested them by both in-kernel units driver and
userspace applications.

This commit adds some tracepoints events for the aim.

Link: https://lore.kernel.org/r/20240623220859.851685-8-o-takashi@sakamocchi.jpSigned-off-by: default avatarTakashi Sakamoto <o-takashi@sakamocchi.jp>
parent 1f3c0d79
...@@ -20,4 +20,8 @@ void copy_port_status(u8 *port_status, unsigned int port_capacity, ...@@ -20,4 +20,8 @@ void copy_port_status(u8 *port_status, unsigned int port_capacity,
self_id_sequence_get_port_status(self_id_sequence, quadlet_count, port_index); self_id_sequence_get_port_status(self_id_sequence, quadlet_count, port_index);
} }
} }
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_single_completions);
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_inbound_multiple_completions);
EXPORT_TRACEPOINT_SYMBOL_GPL(isoc_outbound_completions);
#endif #endif
...@@ -2833,8 +2833,13 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) ...@@ -2833,8 +2833,13 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
} }
} }
static void flush_iso_completions(struct iso_context *ctx) static void flush_iso_completions(struct iso_context *ctx, enum fw_iso_context_completions_cause cause)
{ {
trace_isoc_inbound_single_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
ctx->header_length);
trace_isoc_outbound_completions(&ctx->base, ctx->last_timestamp, cause, ctx->header,
ctx->header_length);
ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
ctx->header_length, ctx->header, ctx->header_length, ctx->header,
ctx->base.callback_data); ctx->base.callback_data);
...@@ -2848,7 +2853,7 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) ...@@ -2848,7 +2853,7 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers) if (ctx->base.drop_overflow_headers)
return; return;
flush_iso_completions(ctx); flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
} }
ctx_hdr = ctx->header + ctx->header_length; ctx_hdr = ctx->header + ctx->header_length;
...@@ -2897,7 +2902,7 @@ static int handle_ir_packet_per_buffer(struct context *context, ...@@ -2897,7 +2902,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
copy_iso_headers(ctx, (u32 *) (last + 1)); copy_iso_headers(ctx, (u32 *) (last + 1));
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
flush_iso_completions(ctx); flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
return 1; return 1;
} }
...@@ -2932,6 +2937,9 @@ static int handle_ir_buffer_fill(struct context *context, ...@@ -2932,6 +2937,9 @@ static int handle_ir_buffer_fill(struct context *context,
completed, DMA_FROM_DEVICE); completed, DMA_FROM_DEVICE);
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
trace_isoc_inbound_multiple_completions(&ctx->base, completed,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
ctx->base.callback.mc(&ctx->base, ctx->base.callback.mc(&ctx->base,
buffer_dma + completed, buffer_dma + completed,
ctx->base.callback_data); ctx->base.callback_data);
...@@ -2948,6 +2956,9 @@ static void flush_ir_buffer_fill(struct iso_context *ctx) ...@@ -2948,6 +2956,9 @@ static void flush_ir_buffer_fill(struct iso_context *ctx)
ctx->mc_buffer_bus & ~PAGE_MASK, ctx->mc_buffer_bus & ~PAGE_MASK,
ctx->mc_completed, DMA_FROM_DEVICE); ctx->mc_completed, DMA_FROM_DEVICE);
trace_isoc_inbound_multiple_completions(&ctx->base, ctx->mc_completed,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
ctx->base.callback.mc(&ctx->base, ctx->base.callback.mc(&ctx->base,
ctx->mc_buffer_bus + ctx->mc_completed, ctx->mc_buffer_bus + ctx->mc_completed,
ctx->base.callback_data); ctx->base.callback_data);
...@@ -3012,7 +3023,7 @@ static int handle_it_packet(struct context *context, ...@@ -3012,7 +3023,7 @@ static int handle_it_packet(struct context *context,
if (ctx->header_length + 4 > PAGE_SIZE) { if (ctx->header_length + 4 > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers) if (ctx->base.drop_overflow_headers)
return 1; return 1;
flush_iso_completions(ctx); flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW);
} }
ctx_hdr = ctx->header + ctx->header_length; ctx_hdr = ctx->header + ctx->header_length;
...@@ -3023,7 +3034,7 @@ static int handle_it_packet(struct context *context, ...@@ -3023,7 +3034,7 @@ static int handle_it_packet(struct context *context,
ctx->header_length += 4; ctx->header_length += 4;
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
flush_iso_completions(ctx); flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ);
return 1; return 1;
} }
...@@ -3588,7 +3599,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base) ...@@ -3588,7 +3599,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
case FW_ISO_CONTEXT_TRANSMIT: case FW_ISO_CONTEXT_TRANSMIT:
case FW_ISO_CONTEXT_RECEIVE: case FW_ISO_CONTEXT_RECEIVE:
if (ctx->header_length != 0) if (ctx->header_length != 0)
flush_iso_completions(ctx); flush_iso_completions(ctx, FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH);
break; break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
if (ctx->mc_completed != 0) if (ctx->mc_completed != 0)
......
...@@ -821,6 +821,84 @@ TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue, ...@@ -821,6 +821,84 @@ TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
#undef TP_STRUCT__entry_iso_packet #undef TP_STRUCT__entry_iso_packet
#undef TP_fast_assign_iso_packet #undef TP_fast_assign_iso_packet
#ifndef show_cause
enum fw_iso_context_completions_cause {
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ,
FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
};
#define show_cause(cause) \
__print_symbolic(cause, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_IRQ, "IRQ" }, \
{ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
)
#endif
DECLARE_EVENT_CLASS(isoc_single_completions_template,
TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
TP_ARGS(ctx, timestamp, cause, header, header_length),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(u16, timestamp)
__field(u8, cause)
__dynamic_array(u32, header, header_length / QUADLET_SIZE)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->timestamp = timestamp;
__entry->cause = cause;
memcpy(__get_dynamic_array(header), header, __get_dynamic_array_len(header));
),
TP_printk(
"context=0x%llx card_index=%u timestap=0x%04x cause=%s header=%s",
__entry->context,
__entry->card_index,
__entry->timestamp,
show_cause(__entry->cause),
__print_array(__get_dynamic_array(header),
__get_dynamic_array_len(header) / QUADLET_SIZE, QUADLET_SIZE)
)
)
DEFINE_EVENT_CONDITION(isoc_single_completions_template, isoc_outbound_completions,
TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
TP_ARGS(ctx, timestamp, cause, header, header_length),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
);
DEFINE_EVENT_CONDITION(isoc_single_completions_template, isoc_inbound_single_completions,
TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
TP_ARGS(ctx, timestamp, cause, header, header_length),
TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
);
TRACE_EVENT(isoc_inbound_multiple_completions,
TP_PROTO(const struct fw_iso_context *ctx, unsigned int completed, enum fw_iso_context_completions_cause cause),
TP_ARGS(ctx, completed, cause),
TP_STRUCT__entry(
__field(u64, context)
__field(u8, card_index)
__field(u16, completed)
__field(u8, cause)
),
TP_fast_assign(
__entry->context = (uintptr_t)ctx;
__entry->card_index = ctx->card->index;
__entry->completed = completed;
__entry->cause = cause;
),
TP_printk(
"context=0x%llx card_index=%u comleted=%u cause=%s",
__entry->context,
__entry->card_index,
__entry->completed,
show_cause(__entry->cause)
)
);
#undef QUADLET_SIZE #undef QUADLET_SIZE
#endif // _FIREWIRE_TRACE_EVENT_H #endif // _FIREWIRE_TRACE_EVENT_H
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment