Commit a9a931e2 authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf: Use sample_flags for branch stack

Use the new sample_flags to indicate whether the branch stack is filled
by the PMU driver.

Remove the br_stack from the perf_sample_data_init() to minimize the number
of cache lines touched.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220901130959.1285717-4-kan.liang@linux.intel.com
parent 47a3aeb3
...@@ -2297,6 +2297,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, ...@@ -2297,6 +2297,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
cpuhw = this_cpu_ptr(&cpu_hw_events); cpuhw = this_cpu_ptr(&cpu_hw_events);
power_pmu_bhrb_read(event, cpuhw); power_pmu_bhrb_read(event, cpuhw);
data.br_stack = &cpuhw->bhrb_stack; data.br_stack = &cpuhw->bhrb_stack;
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
} }
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
......
...@@ -929,8 +929,10 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) ...@@ -929,8 +929,10 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
if (!x86_perf_event_set_period(event)) if (!x86_perf_event_set_period(event))
continue; continue;
if (has_branch_stack(event)) if (has_branch_stack(event)) {
data.br_stack = &cpuc->lbr_stack; data.br_stack = &cpuc->lbr_stack;
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
......
...@@ -1714,8 +1714,10 @@ int x86_pmu_handle_irq(struct pt_regs *regs) ...@@ -1714,8 +1714,10 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
perf_sample_data_init(&data, 0, event->hw.last_period); perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event)) if (has_branch_stack(event)) {
data.br_stack = &cpuc->lbr_stack; data.br_stack = &cpuc->lbr_stack;
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
......
...@@ -2995,8 +2995,10 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) ...@@ -2995,8 +2995,10 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
perf_sample_data_init(&data, 0, event->hw.last_period); perf_sample_data_init(&data, 0, event->hw.last_period);
if (has_branch_stack(event)) if (has_branch_stack(event)) {
data.br_stack = &cpuc->lbr_stack; data.br_stack = &cpuc->lbr_stack;
data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
......
...@@ -1640,8 +1640,10 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, ...@@ -1640,8 +1640,10 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
data->sample_flags |= PERF_SAMPLE_TIME; data->sample_flags |= PERF_SAMPLE_TIME;
} }
if (has_branch_stack(event)) if (has_branch_stack(event)) {
data->br_stack = &cpuc->lbr_stack; data->br_stack = &cpuc->lbr_stack;
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
} }
static void adaptive_pebs_save_regs(struct pt_regs *regs, static void adaptive_pebs_save_regs(struct pt_regs *regs,
...@@ -1791,6 +1793,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, ...@@ -1791,6 +1793,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
if (has_branch_stack(event)) { if (has_branch_stack(event)) {
intel_pmu_store_pebs_lbrs(lbr); intel_pmu_store_pebs_lbrs(lbr);
data->br_stack = &cpuc->lbr_stack; data->br_stack = &cpuc->lbr_stack;
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
} }
} }
......
...@@ -1011,7 +1011,6 @@ struct perf_sample_data { ...@@ -1011,7 +1011,6 @@ struct perf_sample_data {
u64 sample_flags; u64 sample_flags;
u64 addr; u64 addr;
struct perf_raw_record *raw; struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
u64 period; u64 period;
union perf_sample_weight weight; union perf_sample_weight weight;
u64 txn; u64 txn;
...@@ -1021,6 +1020,8 @@ struct perf_sample_data { ...@@ -1021,6 +1020,8 @@ struct perf_sample_data {
* The other fields, optionally {set,used} by * The other fields, optionally {set,used} by
* perf_{prepare,output}_sample(). * perf_{prepare,output}_sample().
*/ */
struct perf_branch_stack *br_stack;
u64 type; u64 type;
u64 ip; u64 ip;
struct { struct {
...@@ -1061,7 +1062,6 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, ...@@ -1061,7 +1062,6 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
data->sample_flags = 0; data->sample_flags = 0;
data->addr = addr; data->addr = addr;
data->raw = NULL; data->raw = NULL;
data->br_stack = NULL;
data->period = period; data->period = period;
data->weight.full = 0; data->weight.full = 0;
data->data_src.val = PERF_MEM_NA; data->data_src.val = PERF_MEM_NA;
......
...@@ -7052,7 +7052,7 @@ void perf_output_sample(struct perf_output_handle *handle, ...@@ -7052,7 +7052,7 @@ void perf_output_sample(struct perf_output_handle *handle,
} }
if (sample_type & PERF_SAMPLE_BRANCH_STACK) { if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) { if (data->sample_flags & PERF_SAMPLE_BRANCH_STACK) {
size_t size; size_t size;
size = data->br_stack->nr size = data->br_stack->nr
...@@ -7358,7 +7358,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7358,7 +7358,7 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_BRANCH_STACK) { if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */ int size = sizeof(u64); /* nr */
if (data->br_stack) { if (data->sample_flags & PERF_SAMPLE_BRANCH_STACK) {
if (perf_sample_save_hw_index(event)) if (perf_sample_save_hw_index(event))
size += sizeof(u64); size += sizeof(u64);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment