Commit 4beb31f3 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Ingo Molnar

perf: Split the per-cpu accounting part of the event accounting code

This way we can use the per-cpu handling seperately.
This is going to be used by to fix the event migration
code accounting.
Original-patch-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-5-git-send-email-fweisbec@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 766d6c07
...@@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head) ...@@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head)
static void ring_buffer_put(struct ring_buffer *rb); static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
static void unaccount_event_cpu(struct perf_event *event, int cpu)
{
if (event->parent)
return;
if (has_branch_stack(event)) {
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
}
if (is_cgroup_event(event))
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
}
static void unaccount_event(struct perf_event *event)
{
if (event->parent)
return;
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (is_cgroup_event(event))
static_key_slow_dec_deferred(&perf_sched_events);
if (has_branch_stack(event))
static_key_slow_dec_deferred(&perf_sched_events);
unaccount_event_cpu(event, event->cpu);
}
static void __free_event(struct perf_event *event) static void __free_event(struct perf_event *event)
{ {
if (!event->parent) { if (!event->parent) {
...@@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event) ...@@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event)
{ {
irq_work_sync(&event->pending); irq_work_sync(&event->pending);
if (!event->parent) { unaccount_event(event);
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events);
}
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
if (!(event->attach_state & PERF_ATTACH_TASK)) {
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
}
if (event->rb) { if (event->rb) {
struct ring_buffer *rb; struct ring_buffer *rb;
...@@ -6451,8 +6463,24 @@ struct pmu *perf_init_event(struct perf_event *event) ...@@ -6451,8 +6463,24 @@ struct pmu *perf_init_event(struct perf_event *event)
return pmu; return pmu;
} }
static void account_event_cpu(struct perf_event *event, int cpu)
{
if (event->parent)
return;
if (has_branch_stack(event)) {
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
}
if (is_cgroup_event(event))
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
}
static void account_event(struct perf_event *event) static void account_event(struct perf_event *event)
{ {
if (event->parent)
return;
if (event->attach_state & PERF_ATTACH_TASK) if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key); static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data) if (event->attr.mmap || event->attr.mmap_data)
...@@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event) ...@@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event)
atomic_inc(&nr_comm_events); atomic_inc(&nr_comm_events);
if (event->attr.task) if (event->attr.task)
atomic_inc(&nr_task_events); atomic_inc(&nr_task_events);
if (has_branch_stack(event)) { if (has_branch_stack(event))
static_key_slow_inc(&perf_sched_events.key); static_key_slow_inc(&perf_sched_events.key);
if (!(event->attach_state & PERF_ATTACH_TASK)) if (is_cgroup_event(event))
atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu));
}
if (is_cgroup_event(event)) {
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_inc(&perf_sched_events.key); static_key_slow_inc(&perf_sched_events.key);
}
account_event_cpu(event, event->cpu);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment