Commit eb55b455 authored by Namhyung Kim's avatar Namhyung Kim Committed by Ingo Molnar

perf/core: Add perf_sample_save_brstack() helper

When we saves the branch stack to the perf sample data, we needs to
update the sample flags and the dynamic size.  To make sure this is
done consistently, add the perf_sample_save_brstack() helper and
convert all call sites.
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Tested-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarAthira Rajeev <atrajeev@linux.vnet.ibm.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230118060559.615653-5-namhyung@kernel.org
parent 0a9081cf
...@@ -2313,8 +2313,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, ...@@ -2313,8 +2313,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
struct cpu_hw_events *cpuhw; struct cpu_hw_events *cpuhw;
cpuhw = this_cpu_ptr(&cpu_hw_events); cpuhw = this_cpu_ptr(&cpu_hw_events);
power_pmu_bhrb_read(event, cpuhw); power_pmu_bhrb_read(event, cpuhw);
data.br_stack = &cpuhw->bhrb_stack; perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack);
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
} }
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
......
...@@ -928,10 +928,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) ...@@ -928,10 +928,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
if (!x86_perf_event_set_period(event)) if (!x86_perf_event_set_period(event))
continue; continue;
if (has_branch_stack(event)) { if (has_branch_stack(event))
data.br_stack = &cpuc->lbr_stack; perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
......
...@@ -3036,10 +3036,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) ...@@ -3036,10 +3036,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
perf_sample_data_init(&data, 0, event->hw.last_period); perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event)) { if (has_branch_stack(event))
data.br_stack = &cpuc->lbr_stack; perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
......
...@@ -1720,10 +1720,8 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, ...@@ -1720,10 +1720,8 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
data->sample_flags |= PERF_SAMPLE_TIME; data->sample_flags |= PERF_SAMPLE_TIME;
} }
if (has_branch_stack(event)) { if (has_branch_stack(event))
data->br_stack = &cpuc->lbr_stack; perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
} }
static void adaptive_pebs_save_regs(struct pt_regs *regs, static void adaptive_pebs_save_regs(struct pt_regs *regs,
...@@ -1883,8 +1881,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, ...@@ -1883,8 +1881,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
if (has_branch_stack(event)) { if (has_branch_stack(event)) {
intel_pmu_store_pebs_lbrs(lbr); intel_pmu_store_pebs_lbrs(lbr);
data->br_stack = &cpuc->lbr_stack; perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
} }
} }
......
...@@ -1102,6 +1102,31 @@ extern u64 perf_event_read_value(struct perf_event *event, ...@@ -1102,6 +1102,31 @@ extern u64 perf_event_read_value(struct perf_event *event,
extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
static inline bool branch_sample_no_flags(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
}
static inline bool branch_sample_no_cycles(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
}
static inline bool branch_sample_type(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
}
static inline bool branch_sample_hw_index(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
}
static inline bool branch_sample_priv(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}
struct perf_sample_data { struct perf_sample_data {
/* /*
...@@ -1210,6 +1235,21 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data, ...@@ -1210,6 +1235,21 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
data->sample_flags |= PERF_SAMPLE_RAW; data->sample_flags |= PERF_SAMPLE_RAW;
} }
static inline void perf_sample_save_brstack(struct perf_sample_data *data,
struct perf_event *event,
struct perf_branch_stack *brs)
{
int size = sizeof(u64); /* nr */
if (branch_sample_hw_index(event))
size += sizeof(u64);
size += brs->nr * sizeof(struct perf_branch_entry);
data->br_stack = brs;
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
/* /*
* Clear all bitfields in the perf_branch_entry. * Clear all bitfields in the perf_branch_entry.
* The to and from fields are not cleared because they are * The to and from fields are not cleared because they are
...@@ -1827,30 +1867,4 @@ static inline void perf_lopwr_cb(bool mode) ...@@ -1827,30 +1867,4 @@ static inline void perf_lopwr_cb(bool mode)
} }
#endif #endif
#ifdef CONFIG_PERF_EVENTS
static inline bool branch_sample_no_flags(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
}
static inline bool branch_sample_no_cycles(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
}
static inline bool branch_sample_type(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
}
static inline bool branch_sample_hw_index(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
}
static inline bool branch_sample_priv(const struct perf_event *event)
{
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}
#endif /* CONFIG_PERF_EVENTS */
#endif /* _LINUX_PERF_EVENT_H */ #endif /* _LINUX_PERF_EVENT_H */
...@@ -7310,7 +7310,7 @@ void perf_output_sample(struct perf_output_handle *handle, ...@@ -7310,7 +7310,7 @@ void perf_output_sample(struct perf_output_handle *handle,
} }
if (sample_type & PERF_SAMPLE_BRANCH_STACK) { if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->sample_flags & PERF_SAMPLE_BRANCH_STACK) { if (data->br_stack) {
size_t size; size_t size;
size = data->br_stack->nr size = data->br_stack->nr
...@@ -7587,16 +7587,10 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7587,16 +7587,10 @@ void perf_prepare_sample(struct perf_event_header *header,
data->sample_flags |= PERF_SAMPLE_RAW; data->sample_flags |= PERF_SAMPLE_RAW;
} }
if (sample_type & PERF_SAMPLE_BRANCH_STACK) { if (filtered_sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */ data->br_stack = NULL;
if (data->sample_flags & PERF_SAMPLE_BRANCH_STACK) { data->dyn_size += sizeof(u64);
if (branch_sample_hw_index(event)) data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
size += sizeof(u64);
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
data->dyn_size += size;
} }
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment