Commit 194002b2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter, x86: Add mmap counter read support

Update the mmap control page with the needed information to
use the userspace RDPMC instruction for self monitoring.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 7f8b4e4e
...@@ -61,6 +61,8 @@ struct pt_regs; ...@@ -61,6 +61,8 @@ struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
#define PERF_COUNTER_INDEX_OFFSET 1
/* /*
* Only override the default definitions in include/linux/perf_counter.h * Only override the default definitions in include/linux/perf_counter.h
* if we have hardware PMU support. * if we have hardware PMU support.
......
...@@ -87,6 +87,9 @@ union cpuid10_edx { ...@@ -87,6 +87,9 @@ union cpuid10_edx {
#ifdef CONFIG_PERF_COUNTERS #ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void); extern void init_hw_perf_counters(void);
extern void perf_counters_lapic_init(void); extern void perf_counters_lapic_init(void);
#define PERF_COUNTER_INDEX_OFFSET 0
#else #else
static inline void init_hw_perf_counters(void) { } static inline void init_hw_perf_counters(void) { }
static inline void perf_counters_lapic_init(void) { } static inline void perf_counters_lapic_init(void) { }
......
...@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter, ...@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter,
err = checking_wrmsrl(hwc->counter_base + idx, err = checking_wrmsrl(hwc->counter_base + idx,
(u64)(-left) & x86_pmu.counter_mask); (u64)(-left) & x86_pmu.counter_mask);
perf_counter_update_userpage(counter);
return ret; return ret;
} }
...@@ -1034,6 +1036,8 @@ static int x86_pmu_enable(struct perf_counter *counter) ...@@ -1034,6 +1036,8 @@ static int x86_pmu_enable(struct perf_counter *counter)
x86_perf_counter_set_period(counter, hwc, idx); x86_perf_counter_set_period(counter, hwc, idx);
x86_pmu.enable(hwc, idx); x86_pmu.enable(hwc, idx);
perf_counter_update_userpage(counter);
return 0; return 0;
} }
...@@ -1126,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter) ...@@ -1126,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter)
x86_perf_counter_update(counter, hwc, idx); x86_perf_counter_update(counter, hwc, idx);
cpuc->counters[idx] = NULL; cpuc->counters[idx] = NULL;
clear_bit(idx, cpuc->used_mask); clear_bit(idx, cpuc->used_mask);
perf_counter_update_userpage(counter);
} }
/* /*
......
...@@ -1753,6 +1753,14 @@ int perf_counter_task_disable(void) ...@@ -1753,6 +1753,14 @@ int perf_counter_task_disable(void)
return 0; return 0;
} }
static int perf_counter_index(struct perf_counter *counter)
{
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
return 0;
return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
}
/* /*
* Callers need to ensure there can be no nesting of this function, otherwise * Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch * the seqlock logic goes bad. We can not serialize this because the arch
...@@ -1777,7 +1785,7 @@ void perf_counter_update_userpage(struct perf_counter *counter) ...@@ -1777,7 +1785,7 @@ void perf_counter_update_userpage(struct perf_counter *counter)
preempt_disable(); preempt_disable();
++userpg->lock; ++userpg->lock;
barrier(); barrier();
userpg->index = counter->hw.idx; userpg->index = perf_counter_index(counter);
userpg->offset = atomic64_read(&counter->count); userpg->offset = atomic64_read(&counter->count);
if (counter->state == PERF_COUNTER_STATE_ACTIVE) if (counter->state == PERF_COUNTER_STATE_ACTIVE)
userpg->offset -= atomic64_read(&counter->hw.prev_count); userpg->offset -= atomic64_read(&counter->hw.prev_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment