Commit 7eb709f2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

perf: Fix sibling iteration

Mark noticed that the change to sibling_list changed some iteration
semantics; because previously we used group_list as list entry,
sibling events would always have an empty sibling_list.

But because we now use sibling_list for both list head and list entry,
siblings will report as having siblings.

Fix this with a custom for_each_sibling_event() iterator.

Fixes: 8343aae6 ("perf/core: Remove perf_event::group_entry")
Reported-by: default avatarMark Rutland <mark.rutland@arm.com>
Suggested-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: vincent.weaver@maine.edu
Cc: alexander.shishkin@linux.intel.com
Cc: torvalds@linux-foundation.org
Cc: alexey.budankov@linux.intel.com
Cc: valery.cherepennikov@intel.com
Cc: eranian@google.com
Cc: acme@redhat.com
Cc: linux-tip-commits@vger.kernel.org
Cc: davidcc@google.com
Cc: kan.liang@intel.com
Cc: Dmitry.Prohorov@intel.com
Cc: jolsa@redhat.com
Link: https://lkml.kernel.org/r/20180315170129.GX4043@hirez.programming.kicks-ass.net
parent 32ff77e8
...@@ -351,7 +351,7 @@ static int collect_events(struct perf_event *group, int max_count, ...@@ -351,7 +351,7 @@ static int collect_events(struct perf_event *group, int max_count,
evtype[n] = group->hw.event_base; evtype[n] = group->hw.event_base;
current_idx[n++] = PMC_NO_INDEX; current_idx[n++] = PMC_NO_INDEX;
} }
list_for_each_entry(pe, &group->sibling_list, sibling_list) { for_each_sibling_event(pe, group) {
if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count) if (n >= max_count)
return -1; return -1;
......
...@@ -269,7 +269,7 @@ static bool mmdc_pmu_group_is_valid(struct perf_event *event) ...@@ -269,7 +269,7 @@ static bool mmdc_pmu_group_is_valid(struct perf_event *event)
return false; return false;
} }
list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { for_each_sibling_event(sibling, leader) {
if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask)) if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
return false; return false;
} }
......
...@@ -293,7 +293,7 @@ static bool l2x0_pmu_group_is_valid(struct perf_event *event) ...@@ -293,7 +293,7 @@ static bool l2x0_pmu_group_is_valid(struct perf_event *event)
else if (!is_software_event(leader)) else if (!is_software_event(leader))
return false; return false;
list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { for_each_sibling_event(sibling, leader) {
if (sibling->pmu == pmu) if (sibling->pmu == pmu)
num_hw++; num_hw++;
else if (!is_software_event(sibling)) else if (!is_software_event(sibling))
......
...@@ -711,7 +711,7 @@ static int validate_group(struct perf_event *event) ...@@ -711,7 +711,7 @@ static int validate_group(struct perf_event *event)
if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
return -EINVAL; return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { for_each_sibling_event(sibling, leader) {
if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
return -EINVAL; return -EINVAL;
} }
......
...@@ -1426,7 +1426,7 @@ static int collect_events(struct perf_event *group, int max_count, ...@@ -1426,7 +1426,7 @@ static int collect_events(struct perf_event *group, int max_count,
flags[n] = group->hw.event_base; flags[n] = group->hw.event_base;
events[n++] = group->hw.config; events[n++] = group->hw.config;
} }
list_for_each_entry(event, &group->sibling_list, sibling_list) { for_each_sibling_event(event, group) {
if (event->pmu->task_ctx_nr == perf_hw_context && if (event->pmu->task_ctx_nr == perf_hw_context &&
event->state != PERF_EVENT_STATE_OFF) { event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count) if (n >= max_count)
......
...@@ -277,7 +277,7 @@ static int collect_events(struct perf_event *group, int max_count, ...@@ -277,7 +277,7 @@ static int collect_events(struct perf_event *group, int max_count,
ctrs[n] = group; ctrs[n] = group;
n++; n++;
} }
list_for_each_entry(event, &group->sibling_list, sibling_list) { for_each_sibling_event(event, group) {
if (!is_software_event(event) && if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) { event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count) if (n >= max_count)
......
...@@ -1342,7 +1342,7 @@ static int collect_events(struct perf_event *group, int max_count, ...@@ -1342,7 +1342,7 @@ static int collect_events(struct perf_event *group, int max_count,
events[n] = group->hw.event_base; events[n] = group->hw.event_base;
current_idx[n++] = PIC_NO_INDEX; current_idx[n++] = PIC_NO_INDEX;
} }
list_for_each_entry(event, &group->sibling_list, sibling_list) { for_each_sibling_event(event, group) {
if (!is_software_event(event) && if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) { event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count) if (n >= max_count)
......
...@@ -990,7 +990,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, ...@@ -990,7 +990,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
if (!dogrp) if (!dogrp)
return n; return n;
list_for_each_entry(event, &leader->sibling_list, sibling_list) { for_each_sibling_event(event, leader) {
if (!is_x86_event(event) || if (!is_x86_event(event) ||
event->state <= PERF_EVENT_STATE_OFF) event->state <= PERF_EVENT_STATE_OFF)
continue; continue;
......
...@@ -354,7 +354,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, ...@@ -354,7 +354,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
if (!dogrp) if (!dogrp)
return n; return n;
list_for_each_entry(event, &leader->sibling_list, sibling_list) { for_each_sibling_event(event, leader) {
if (!is_box_event(box, event) || if (!is_box_event(box, event) ||
event->state <= PERF_EVENT_STATE_OFF) event->state <= PERF_EVENT_STATE_OFF)
continue; continue;
......
...@@ -1311,7 +1311,7 @@ validate_group(struct perf_event *event) ...@@ -1311,7 +1311,7 @@ validate_group(struct perf_event *event)
if (!validate_event(event->pmu, &fake_pmu, leader)) if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL; return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { for_each_sibling_event(sibling, leader) {
if (!validate_event(event->pmu, &fake_pmu, sibling)) if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL; return -EINVAL;
} }
......
...@@ -846,11 +846,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) ...@@ -846,11 +846,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
!is_software_event(event->group_leader)) !is_software_event(event->group_leader))
return -EINVAL; return -EINVAL;
list_for_each_entry(sibling, &event->group_leader->sibling_list, for_each_sibling_event(sibling, event->group_leader) {
sibling_list)
if (sibling->pmu != event->pmu && if (sibling->pmu != event->pmu &&
!is_software_event(sibling)) !is_software_event(sibling))
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -536,7 +536,7 @@ static bool dsu_pmu_validate_group(struct perf_event *event) ...@@ -536,7 +536,7 @@ static bool dsu_pmu_validate_group(struct perf_event *event)
memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask)); memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader)) if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
return false; return false;
list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { for_each_sibling_event(sibling, leader) {
if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling)) if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
return false; return false;
} }
......
...@@ -311,7 +311,7 @@ validate_group(struct perf_event *event) ...@@ -311,7 +311,7 @@ validate_group(struct perf_event *event)
if (!validate_event(event->pmu, &fake_pmu, leader)) if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL; return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { for_each_sibling_event(sibling, leader) {
if (!validate_event(event->pmu, &fake_pmu, sibling)) if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL; return -EINVAL;
} }
......
...@@ -82,7 +82,7 @@ static bool hisi_validate_event_group(struct perf_event *event) ...@@ -82,7 +82,7 @@ static bool hisi_validate_event_group(struct perf_event *event)
counters++; counters++;
} }
list_for_each_entry(sibling, &event->group_leader->sibling_list, sibling_list) { for_each_sibling_event(sibling, event->group_leader) {
if (is_software_event(sibling)) if (is_software_event(sibling))
continue; continue;
if (sibling->pmu != event->pmu) if (sibling->pmu != event->pmu)
......
...@@ -534,8 +534,7 @@ static int l2_cache_event_init(struct perf_event *event) ...@@ -534,8 +534,7 @@ static int l2_cache_event_init(struct perf_event *event)
return -EINVAL; return -EINVAL;
} }
list_for_each_entry(sibling, &event->group_leader->sibling_list, for_each_sibling_event(sibling, event->group_leader) {
sibling_list)
if (sibling->pmu != event->pmu && if (sibling->pmu != event->pmu &&
!is_software_event(sibling)) { !is_software_event(sibling)) {
dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
...@@ -571,8 +570,7 @@ static int l2_cache_event_init(struct perf_event *event) ...@@ -571,8 +570,7 @@ static int l2_cache_event_init(struct perf_event *event)
return -EINVAL; return -EINVAL;
} }
list_for_each_entry(sibling, &event->group_leader->sibling_list, for_each_sibling_event(sibling, event->group_leader) {
sibling_list) {
if ((sibling != event) && if ((sibling != event) &&
!is_software_event(sibling) && !is_software_event(sibling) &&
(L2_EVT_GROUP(sibling->attr.config) == (L2_EVT_GROUP(sibling->attr.config) ==
......
...@@ -468,7 +468,7 @@ static bool qcom_l3_cache__validate_event_group(struct perf_event *event) ...@@ -468,7 +468,7 @@ static bool qcom_l3_cache__validate_event_group(struct perf_event *event)
counters = event_num_counters(event); counters = event_num_counters(event);
counters += event_num_counters(leader); counters += event_num_counters(leader);
list_for_each_entry(sibling, &leader->sibling_list, sibling_list) { for_each_sibling_event(sibling, leader) {
if (is_software_event(sibling)) if (is_software_event(sibling))
continue; continue;
if (sibling->pmu != event->pmu) if (sibling->pmu != event->pmu)
......
...@@ -949,11 +949,11 @@ static int xgene_perf_event_init(struct perf_event *event) ...@@ -949,11 +949,11 @@ static int xgene_perf_event_init(struct perf_event *event)
!is_software_event(event->group_leader)) !is_software_event(event->group_leader))
return -EINVAL; return -EINVAL;
list_for_each_entry(sibling, &event->group_leader->sibling_list, for_each_sibling_event(sibling, event->group_leader) {
sibling_list)
if (sibling->pmu != event->pmu && if (sibling->pmu != event->pmu &&
!is_software_event(sibling)) !is_software_event(sibling))
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -536,6 +536,10 @@ struct pmu_event_list { ...@@ -536,6 +536,10 @@ struct pmu_event_list {
struct list_head list; struct list_head list;
}; };
#define for_each_sibling_event(sibling, event) \
if ((event)->group_leader == (event)) \
list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
/** /**
* struct perf_event - performance event kernel representation: * struct perf_event - performance event kernel representation:
*/ */
......
...@@ -643,7 +643,7 @@ static void perf_event_update_sibling_time(struct perf_event *leader) ...@@ -643,7 +643,7 @@ static void perf_event_update_sibling_time(struct perf_event *leader)
{ {
struct perf_event *sibling; struct perf_event *sibling;
list_for_each_entry(sibling, &leader->sibling_list, sibling_list) for_each_sibling_event(sibling, leader)
perf_event_update_time(sibling); perf_event_update_time(sibling);
} }
...@@ -1828,7 +1828,7 @@ static void perf_group_attach(struct perf_event *event) ...@@ -1828,7 +1828,7 @@ static void perf_group_attach(struct perf_event *event)
perf_event__header_size(group_leader); perf_event__header_size(group_leader);
list_for_each_entry(pos, &group_leader->sibling_list, sibling_list) for_each_sibling_event(pos, group_leader)
perf_event__header_size(pos); perf_event__header_size(pos);
} }
...@@ -1928,7 +1928,7 @@ static void perf_group_detach(struct perf_event *event) ...@@ -1928,7 +1928,7 @@ static void perf_group_detach(struct perf_event *event)
out: out:
perf_event__header_size(event->group_leader); perf_event__header_size(event->group_leader);
list_for_each_entry(tmp, &event->group_leader->sibling_list, sibling_list) for_each_sibling_event(tmp, event->group_leader)
perf_event__header_size(tmp); perf_event__header_size(tmp);
} }
...@@ -1951,13 +1951,13 @@ static inline int __pmu_filter_match(struct perf_event *event) ...@@ -1951,13 +1951,13 @@ static inline int __pmu_filter_match(struct perf_event *event)
*/ */
static inline int pmu_filter_match(struct perf_event *event) static inline int pmu_filter_match(struct perf_event *event)
{ {
struct perf_event *child; struct perf_event *sibling;
if (!__pmu_filter_match(event)) if (!__pmu_filter_match(event))
return 0; return 0;
list_for_each_entry(child, &event->sibling_list, sibling_list) { for_each_sibling_event(sibling, event) {
if (!__pmu_filter_match(child)) if (!__pmu_filter_match(sibling))
return 0; return 0;
} }
...@@ -2031,7 +2031,7 @@ group_sched_out(struct perf_event *group_event, ...@@ -2031,7 +2031,7 @@ group_sched_out(struct perf_event *group_event,
/* /*
* Schedule out siblings (if any): * Schedule out siblings (if any):
*/ */
list_for_each_entry(event, &group_event->sibling_list, sibling_list) for_each_sibling_event(event, group_event)
event_sched_out(event, cpuctx, ctx); event_sched_out(event, cpuctx, ctx);
perf_pmu_enable(ctx->pmu); perf_pmu_enable(ctx->pmu);
...@@ -2310,7 +2310,7 @@ group_sched_in(struct perf_event *group_event, ...@@ -2310,7 +2310,7 @@ group_sched_in(struct perf_event *group_event,
/* /*
* Schedule in siblings as one group (if any): * Schedule in siblings as one group (if any):
*/ */
list_for_each_entry(event, &group_event->sibling_list, sibling_list) { for_each_sibling_event(event, group_event) {
if (event_sched_in(event, cpuctx, ctx)) { if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event; partial_group = event;
goto group_error; goto group_error;
...@@ -2326,7 +2326,7 @@ group_sched_in(struct perf_event *group_event, ...@@ -2326,7 +2326,7 @@ group_sched_in(struct perf_event *group_event,
* partial group before returning: * partial group before returning:
* The events up to the failed event are scheduled out normally. * The events up to the failed event are scheduled out normally.
*/ */
list_for_each_entry(event, &group_event->sibling_list, sibling_list) { for_each_sibling_event(event, group_event) {
if (event == partial_group) if (event == partial_group)
break; break;
...@@ -3863,7 +3863,7 @@ static void __perf_event_read(void *info) ...@@ -3863,7 +3863,7 @@ static void __perf_event_read(void *info)
pmu->read(event); pmu->read(event);
list_for_each_entry(sub, &event->sibling_list, sibling_list) { for_each_sibling_event(sub, event) {
if (sub->state == PERF_EVENT_STATE_ACTIVE) { if (sub->state == PERF_EVENT_STATE_ACTIVE) {
/* /*
* Use sibling's PMU rather than @event's since * Use sibling's PMU rather than @event's since
...@@ -4711,7 +4711,7 @@ static int __perf_read_group_add(struct perf_event *leader, ...@@ -4711,7 +4711,7 @@ static int __perf_read_group_add(struct perf_event *leader,
if (read_format & PERF_FORMAT_ID) if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader); values[n++] = primary_event_id(leader);
list_for_each_entry(sub, &leader->sibling_list, sibling_list) { for_each_sibling_event(sub, leader) {
values[n++] += perf_event_count(sub); values[n++] += perf_event_count(sub);
if (read_format & PERF_FORMAT_ID) if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub); values[n++] = primary_event_id(sub);
...@@ -4905,7 +4905,7 @@ static void perf_event_for_each(struct perf_event *event, ...@@ -4905,7 +4905,7 @@ static void perf_event_for_each(struct perf_event *event,
event = event->group_leader; event = event->group_leader;
perf_event_for_each_child(event, func); perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, sibling_list) for_each_sibling_event(sibling, event)
perf_event_for_each_child(sibling, func); perf_event_for_each_child(sibling, func);
} }
...@@ -6077,7 +6077,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, ...@@ -6077,7 +6077,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
__output_copy(handle, values, n * sizeof(u64)); __output_copy(handle, values, n * sizeof(u64));
list_for_each_entry(sub, &leader->sibling_list, sibling_list) { for_each_sibling_event(sub, leader) {
n = 0; n = 0;
if ((sub != event) && if ((sub != event) &&
...@@ -10662,8 +10662,7 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -10662,8 +10662,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_remove_from_context(group_leader, 0); perf_remove_from_context(group_leader, 0);
put_ctx(gctx); put_ctx(gctx);
list_for_each_entry(sibling, &group_leader->sibling_list, for_each_sibling_event(sibling, group_leader) {
sibling_list) {
perf_remove_from_context(sibling, 0); perf_remove_from_context(sibling, 0);
put_ctx(gctx); put_ctx(gctx);
} }
...@@ -10684,8 +10683,7 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -10684,8 +10683,7 @@ SYSCALL_DEFINE5(perf_event_open,
* By installing siblings first we NO-OP because they're not * By installing siblings first we NO-OP because they're not
* reachable through the group lists. * reachable through the group lists.
*/ */
list_for_each_entry(sibling, &group_leader->sibling_list, for_each_sibling_event(sibling, group_leader) {
sibling_list) {
perf_event__state_init(sibling); perf_event__state_init(sibling);
perf_install_in_context(ctx, sibling, sibling->cpu); perf_install_in_context(ctx, sibling, sibling->cpu);
get_ctx(ctx); get_ctx(ctx);
...@@ -11324,7 +11322,7 @@ static int inherit_group(struct perf_event *parent_event, ...@@ -11324,7 +11322,7 @@ static int inherit_group(struct perf_event *parent_event,
* case inherit_event() will create individual events, similar to what * case inherit_event() will create individual events, similar to what
* perf_group_detach() would do anyway. * perf_group_detach() would do anyway.
*/ */
list_for_each_entry(sub, &parent_event->sibling_list, sibling_list) { for_each_sibling_event(sub, parent_event) {
child_ctr = inherit_event(sub, parent, parent_ctx, child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx); child, leader, child_ctx);
if (IS_ERR(child_ctr)) if (IS_ERR(child_ctr))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment