Commit 4ac13294 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

perf counters: protect them against CSTATE transitions

Impact: fix rare lost events problem

There are CPUs whose performance counters misbehave on CSTATE transitions,
so provide a way to just disable/enable them around deep idle methods.

(hw_perf_enable_all() is cheap on x86.)
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 43874d23
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -119,10 +120,21 @@ void hw_perf_enable_all(void) ...@@ -119,10 +120,21 @@ void hw_perf_enable_all(void)
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
} }
void hw_perf_disable_all(void) void hw_perf_restore_ctrl(u64 ctrl)
{ {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
}
EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl);
u64 hw_perf_disable_all(void)
{
u64 ctrl;
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
return ctrl;
} }
EXPORT_SYMBOL_GPL(hw_perf_disable_all);
static inline void static inline void
__hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) __hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
......
...@@ -270,8 +270,11 @@ static atomic_t c3_cpu_count; ...@@ -270,8 +270,11 @@ static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */ /* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate) static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{ {
u64 pctrl;
/* Don't trace irqs off for idle */ /* Don't trace irqs off for idle */
stop_critical_timings(); stop_critical_timings();
pctrl = hw_perf_disable_all();
if (cstate->entry_method == ACPI_CSTATE_FFH) { if (cstate->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */ /* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cstate); acpi_processor_ffh_cstate_enter(cstate);
...@@ -284,6 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) ...@@ -284,6 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
gets asserted in time to freeze execution properly. */ gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address); unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
hw_perf_restore_ctrl(pctrl);
start_critical_timings(); start_critical_timings();
} }
#endif /* !CONFIG_CPU_IDLE */ #endif /* !CONFIG_CPU_IDLE */
...@@ -1425,8 +1429,11 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, ...@@ -1425,8 +1429,11 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
*/ */
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{ {
u64 pctrl;
/* Don't trace irqs off for idle */ /* Don't trace irqs off for idle */
stop_critical_timings(); stop_critical_timings();
pctrl = hw_perf_disable_all();
if (cx->entry_method == ACPI_CSTATE_FFH) { if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */ /* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx); acpi_processor_ffh_cstate_enter(cx);
...@@ -1441,6 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) ...@@ -1441,6 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
gets asserted in time to freeze execution properly. */ gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address); unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
hw_perf_restore_ctrl(pctrl);
start_critical_timings(); start_critical_timings();
} }
......
...@@ -156,6 +156,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu); ...@@ -156,6 +156,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu);
extern void perf_counter_init_task(struct task_struct *task); extern void perf_counter_init_task(struct task_struct *task);
extern void perf_counter_notify(struct pt_regs *regs); extern void perf_counter_notify(struct pt_regs *regs);
extern void perf_counter_print_debug(void); extern void perf_counter_print_debug(void);
extern void hw_perf_restore_ctrl(u64 ctrl);
extern u64 hw_perf_disable_all(void);
#else #else
static inline void static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { } perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
...@@ -166,6 +168,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { } ...@@ -166,6 +168,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu) { }
static inline void perf_counter_init_task(struct task_struct *task) { } static inline void perf_counter_init_task(struct task_struct *task) { }
static inline void perf_counter_notify(struct pt_regs *regs) { } static inline void perf_counter_notify(struct pt_regs *regs) { }
static inline void perf_counter_print_debug(void) { } static inline void perf_counter_print_debug(void) { }
static inline void hw_perf_restore_ctrl(u64 ctrl) { }
static inline u64 hw_perf_disable_all(void) { return 0; }
#endif #endif
#endif /* _LINUX_PERF_COUNTER_H */ #endif /* _LINUX_PERF_COUNTER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment