Commit 04289bb9 authored by Ingo Molnar's avatar Ingo Molnar

perf counters: add support for group counters

Impact: add group counters

This patch adds the "counter groups" abstraction.

Groups of counters behave much like normal 'single' counters, with a
few semantic and behavioral extensions on top of that.

A counter group is created by creating a new counter with the open()
syscall's group-leader group_fd file descriptor parameter pointing
to another, already existing counter.

Groups of counters are scheduled in and out in one atomic group, and
they are also roundrobin-scheduled atomically.

Counters that are member of a group can also record events with an
(atomic) extended timestamp that extends to all members of the group,
if the record type is set to PERF_RECORD_GROUP.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9f66a381
......@@ -346,18 +346,22 @@ static void perf_save_and_restart(struct perf_counter *counter)
}
static void
perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
{
struct perf_counter_context *ctx = leader->ctx;
struct perf_counter *counter;
struct perf_counter *counter, *group_leader = sibling->group_leader;
int bit;
list_for_each_entry(counter, &ctx->counters, list) {
if (counter->hw_event.record_type != PERF_RECORD_SIMPLE ||
counter == leader)
continue;
/*
* Store the counter's own timestamp first:
*/
perf_store_irq_data(sibling, sibling->hw_event.type);
perf_store_irq_data(sibling, atomic64_counter_read(sibling));
if (counter->active) {
/*
* Then store sibling timestamps (if any):
*/
list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
if (!counter->active) {
/*
* When counter was not in the overflow mask, we have to
* read it from hardware. We read it as well, when it
......@@ -371,8 +375,8 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
perf_save_and_restart(counter);
}
}
perf_store_irq_data(leader, counter->hw_event.type);
perf_store_irq_data(leader, atomic64_counter_read(counter));
perf_store_irq_data(sibling, counter->hw_event.type);
perf_store_irq_data(sibling, atomic64_counter_read(counter));
}
}
......@@ -416,10 +420,6 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
perf_store_irq_data(counter, instruction_pointer(regs));
break;
case PERF_RECORD_GROUP:
perf_store_irq_data(counter,
counter->hw_event.type);
perf_store_irq_data(counter,
atomic64_counter_read(counter));
perf_handle_group(counter, &status, &ack);
break;
}
......
......@@ -117,7 +117,10 @@ struct perf_data {
* struct perf_counter - performance counter kernel representation:
*/
struct perf_counter {
struct list_head list;
struct list_head list_entry;
struct list_head sibling_list;
struct perf_counter *group_leader;
int active;
#if BITS_PER_LONG == 64
atomic64_t count;
......@@ -158,7 +161,8 @@ struct perf_counter_context {
* Protect the list of counters:
*/
spinlock_t lock;
struct list_head counters;
struct list_head counter_list;
int nr_counters;
int nr_active;
struct task_struct *task;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment