Commit 6e6804d2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/core: Simpify perf_event_groups_for_each()

The last argument is, and always must be, the same.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Dmitri Prokhorov <Dmitry.Prohorov@intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valery Cherepennikov <valery.cherepennikov@intel.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6668128a
...@@ -1642,11 +1642,11 @@ perf_event_groups_rotate(struct perf_event_groups *groups, int cpu) ...@@ -1642,11 +1642,11 @@ perf_event_groups_rotate(struct perf_event_groups *groups, int cpu)
/* /*
* Iterate through the whole groups tree. * Iterate through the whole groups tree.
*/ */
#define perf_event_groups_for_each(event, groups, node) \ #define perf_event_groups_for_each(event, groups) \
for (event = rb_entry_safe(rb_first(&((groups)->tree)), \ for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
typeof(*event), node); event; \ typeof(*event), group_node); event; \
event = rb_entry_safe(rb_next(&event->node), \ event = rb_entry_safe(rb_next(&event->group_node), \
typeof(*event), node)) typeof(*event), group_node))
/* /*
* Add a event from the lists for its context. * Add a event from the lists for its context.
...@@ -11345,7 +11345,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) ...@@ -11345,7 +11345,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
* We dont have to disable NMIs - we are only looking at * We dont have to disable NMIs - we are only looking at
* the list, not manipulating it: * the list, not manipulating it:
*/ */
perf_event_groups_for_each(event, &parent_ctx->pinned_groups, group_node) { perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
ret = inherit_task_group(event, parent, parent_ctx, ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all); child, ctxn, &inherited_all);
if (ret) if (ret)
...@@ -11361,7 +11361,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) ...@@ -11361,7 +11361,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
parent_ctx->rotate_disable = 1; parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
perf_event_groups_for_each(event, &parent_ctx->flexible_groups, group_node) { perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
ret = inherit_task_group(event, parent, parent_ctx, ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all); child, ctxn, &inherited_all);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment