Commit 828f0afd authored by Don Zickus's avatar Don Zickus Committed by Andi Kleen

[PATCH] x86: Add performance counter reservation framework for UP kernels

Adds basic infrastructure to allow subsystems to reserve performance
counters on the x86 chips.  Only UP kernels are supported in this patch to
make reviewing easier.  The SMP portion makes a lot more changes.

Think of this as a locking mechanism where each bit represents a different
counter.  In addition, each subsystem should also reserve an appropriate
event selection register that will correspond to the performance counter it
will be using (this is mainly neccessary for the Pentium 4 chips as they
break the 1:1 relationship to performance counters).

This will help prevent subsystems like oprofile from interfering with the
nmi watchdog.
Signed-off-by: default avatarDon Zickus <dzickus@redhat.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
parent b07f8915
...@@ -34,6 +34,20 @@ static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */ ...@@ -34,6 +34,20 @@ static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
static unsigned int nmi_p4_cccr_val; static unsigned int nmi_p4_cccr_val;
extern void show_registers(struct pt_regs *regs); extern void show_registers(struct pt_regs *regs);
/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
* evtsel_nmi_owner tracks the ownership of the event selection
* - different performance counters/ event selection may be reserved for
* different subsystems this reservation system just tries to coordinate
* things a little
*/
static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
*/
#define NMI_MAX_COUNTER_BITS 66
/* /*
* lapic_nmi_owner tracks the ownership of the lapic NMI hardware: * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
* - it may be reserved by some other driver, or not * - it may be reserved by some other driver, or not
...@@ -95,6 +109,105 @@ int nmi_active; ...@@ -95,6 +109,105 @@ int nmi_active;
(P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
/* converts an msr to an appropriate reservation bit */
static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
{
/* returns the bit offset of the performance counter register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
return (msr - MSR_K7_PERFCTR0);
case X86_VENDOR_INTEL:
switch (boot_cpu_data.x86) {
case 6:
return (msr - MSR_P6_PERFCTR0);
case 15:
return (msr - MSR_P4_BPU_PERFCTR0);
}
}
return 0;
}
/* converts an msr to an appropriate reservation bit */
static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
{
/* returns the bit offset of the event selection register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
return (msr - MSR_K7_EVNTSEL0);
case X86_VENDOR_INTEL:
switch (boot_cpu_data.x86) {
case 6:
return (msr - MSR_P6_EVNTSEL0);
case 15:
return (msr - MSR_P4_BSU_ESCR0);
}
}
return 0;
}
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
}
int reserve_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
return 1;
return 0;
}
void release_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
}
int reserve_evntsel_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]))
return 1;
return 0;
}
void release_evntsel_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* The performance counters used by NMI_LOCAL_APIC don't trigger when /* The performance counters used by NMI_LOCAL_APIC don't trigger when
* the CPU is idle. To make sure the NMI watchdog really ticks on all * the CPU is idle. To make sure the NMI watchdog really ticks on all
...@@ -344,14 +457,6 @@ late_initcall(init_lapic_nmi_sysfs); ...@@ -344,14 +457,6 @@ late_initcall(init_lapic_nmi_sysfs);
* Original code written by Keith Owens. * Original code written by Keith Owens.
*/ */
static void clear_msr_range(unsigned int base, unsigned int n)
{
unsigned int i;
for(i = 0; i < n; ++i)
wrmsr(base+i, 0, 0);
}
static void write_watchdog_counter(const char *descr) static void write_watchdog_counter(const char *descr)
{ {
u64 count = (u64)cpu_khz * 1000; u64 count = (u64)cpu_khz * 1000;
...@@ -362,14 +467,19 @@ static void write_watchdog_counter(const char *descr) ...@@ -362,14 +467,19 @@ static void write_watchdog_counter(const char *descr)
wrmsrl(nmi_perfctr_msr, 0 - count); wrmsrl(nmi_perfctr_msr, 0 - count);
} }
static void setup_k7_watchdog(void) static int setup_k7_watchdog(void)
{ {
unsigned int evntsel; unsigned int evntsel;
nmi_perfctr_msr = MSR_K7_PERFCTR0; nmi_perfctr_msr = MSR_K7_PERFCTR0;
clear_msr_range(MSR_K7_EVNTSEL0, 4); if (!reserve_perfctr_nmi(nmi_perfctr_msr))
clear_msr_range(MSR_K7_PERFCTR0, 4); goto fail;
if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0))
goto fail1;
wrmsrl(MSR_K7_PERFCTR0, 0UL);
evntsel = K7_EVNTSEL_INT evntsel = K7_EVNTSEL_INT
| K7_EVNTSEL_OS | K7_EVNTSEL_OS
...@@ -381,16 +491,24 @@ static void setup_k7_watchdog(void) ...@@ -381,16 +491,24 @@ static void setup_k7_watchdog(void)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= K7_EVNTSEL_ENABLE; evntsel |= K7_EVNTSEL_ENABLE;
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
return 1;
fail1:
release_perfctr_nmi(nmi_perfctr_msr);
fail:
return 0;
} }
static void setup_p6_watchdog(void) static int setup_p6_watchdog(void)
{ {
unsigned int evntsel; unsigned int evntsel;
nmi_perfctr_msr = MSR_P6_PERFCTR0; nmi_perfctr_msr = MSR_P6_PERFCTR0;
clear_msr_range(MSR_P6_EVNTSEL0, 2); if (!reserve_perfctr_nmi(nmi_perfctr_msr))
clear_msr_range(MSR_P6_PERFCTR0, 2); goto fail;
if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0))
goto fail1;
evntsel = P6_EVNTSEL_INT evntsel = P6_EVNTSEL_INT
| P6_EVNTSEL_OS | P6_EVNTSEL_OS
...@@ -402,6 +520,11 @@ static void setup_p6_watchdog(void) ...@@ -402,6 +520,11 @@ static void setup_p6_watchdog(void)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= P6_EVNTSEL0_ENABLE; evntsel |= P6_EVNTSEL0_ENABLE;
wrmsr(MSR_P6_EVNTSEL0, evntsel, 0); wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
return 1;
fail1:
release_perfctr_nmi(nmi_perfctr_msr);
fail:
return 0;
} }
static int setup_p4_watchdog(void) static int setup_p4_watchdog(void)
...@@ -419,22 +542,11 @@ static int setup_p4_watchdog(void) ...@@ -419,22 +542,11 @@ static int setup_p4_watchdog(void)
nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
#endif #endif
if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) if (!reserve_perfctr_nmi(nmi_perfctr_msr))
clear_msr_range(0x3F1, 2); goto fail;
/* MSR 0x3F0 seems to have a default value of 0xFC00, but current
docs doesn't fully define it, so leave it alone for now. */ if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
if (boot_cpu_data.x86_model >= 0x3) { goto fail1;
/* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
clear_msr_range(0x3A0, 26);
clear_msr_range(0x3BC, 3);
} else {
clear_msr_range(0x3A0, 31);
}
clear_msr_range(0x3C0, 6);
clear_msr_range(0x3C8, 6);
clear_msr_range(0x3E0, 2);
clear_msr_range(MSR_P4_CCCR0, 18);
clear_msr_range(MSR_P4_PERFCTR0, 18);
wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
...@@ -442,6 +554,10 @@ static int setup_p4_watchdog(void) ...@@ -442,6 +554,10 @@ static int setup_p4_watchdog(void)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
return 1; return 1;
fail1:
release_perfctr_nmi(nmi_perfctr_msr);
fail:
return 0;
} }
void setup_apic_nmi_watchdog (void) void setup_apic_nmi_watchdog (void)
...@@ -450,7 +566,8 @@ void setup_apic_nmi_watchdog (void) ...@@ -450,7 +566,8 @@ void setup_apic_nmi_watchdog (void)
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15) if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
return; return;
setup_k7_watchdog(); if (!setup_k7_watchdog())
return;
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
switch (boot_cpu_data.x86) { switch (boot_cpu_data.x86) {
...@@ -458,7 +575,8 @@ void setup_apic_nmi_watchdog (void) ...@@ -458,7 +575,8 @@ void setup_apic_nmi_watchdog (void)
if (boot_cpu_data.x86_model > 0xd) if (boot_cpu_data.x86_model > 0xd)
return; return;
setup_p6_watchdog(); if(!setup_p6_watchdog())
return;
break; break;
case 15: case 15:
if (boot_cpu_data.x86_model > 0x4) if (boot_cpu_data.x86_model > 0x4)
...@@ -612,6 +730,12 @@ int proc_unknown_nmi_panic(ctl_table *table, int write, struct file *file, ...@@ -612,6 +730,12 @@ int proc_unknown_nmi_panic(ctl_table *table, int write, struct file *file,
EXPORT_SYMBOL(nmi_active); EXPORT_SYMBOL(nmi_active);
EXPORT_SYMBOL(nmi_watchdog); EXPORT_SYMBOL(nmi_watchdog);
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
EXPORT_SYMBOL(reserve_perfctr_nmi);
EXPORT_SYMBOL(release_perfctr_nmi);
EXPORT_SYMBOL(reserve_evntsel_nmi);
EXPORT_SYMBOL(release_evntsel_nmi);
EXPORT_SYMBOL(reserve_lapic_nmi); EXPORT_SYMBOL(reserve_lapic_nmi);
EXPORT_SYMBOL(release_lapic_nmi); EXPORT_SYMBOL(release_lapic_nmi);
EXPORT_SYMBOL(disable_timer_nmi_watchdog); EXPORT_SYMBOL(disable_timer_nmi_watchdog);
......
...@@ -27,6 +27,20 @@ ...@@ -27,6 +27,20 @@
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/mce.h> #include <asm/mce.h>
/* perfctr_nmi_owner tracks the ownership of the perfctr registers:
* evtsel_nmi_owner tracks the ownership of the event selection
* - different performance counters/ event selection may be reserved for
* different subsystems this reservation system just tries to coordinate
* things a little
*/
static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
*/
#define NMI_MAX_COUNTER_BITS 66
/* /*
* lapic_nmi_owner tracks the ownership of the lapic NMI hardware: * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
* - it may be reserved by some other driver, or not * - it may be reserved by some other driver, or not
...@@ -90,6 +104,95 @@ static unsigned int nmi_p4_cccr_val; ...@@ -90,6 +104,95 @@ static unsigned int nmi_p4_cccr_val;
(P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
/* converts an msr to an appropriate reservation bit */
static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
{
/* returns the bit offset of the performance counter register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
return (msr - MSR_K7_PERFCTR0);
case X86_VENDOR_INTEL:
return (msr - MSR_P4_BPU_PERFCTR0);
}
return 0;
}
/* converts an msr to an appropriate reservation bit */
static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
{
/* returns the bit offset of the event selection register */
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
return (msr - MSR_K7_EVNTSEL0);
case X86_VENDOR_INTEL:
return (msr - MSR_P4_BSU_ESCR0);
}
return 0;
}
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
}
int reserve_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
return 1;
return 0;
}
void release_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
}
int reserve_evntsel_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
return 1;
return 0;
}
void release_evntsel_nmi(unsigned int msr)
{
unsigned int counter;
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
}
static __cpuinit inline int nmi_known_cpu(void) static __cpuinit inline int nmi_known_cpu(void)
{ {
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
...@@ -325,34 +428,22 @@ late_initcall(init_lapic_nmi_sysfs); ...@@ -325,34 +428,22 @@ late_initcall(init_lapic_nmi_sysfs);
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
/* static int setup_k7_watchdog(void)
* Activate the NMI watchdog via the local APIC.
* Original code written by Keith Owens.
*/
static void clear_msr_range(unsigned int base, unsigned int n)
{
unsigned int i;
for(i = 0; i < n; ++i)
wrmsr(base+i, 0, 0);
}
static void setup_k7_watchdog(void)
{ {
int i;
unsigned int evntsel; unsigned int evntsel;
nmi_perfctr_msr = MSR_K7_PERFCTR0; nmi_perfctr_msr = MSR_K7_PERFCTR0;
for(i = 0; i < 4; ++i) { if (!reserve_perfctr_nmi(nmi_perfctr_msr))
goto fail;
if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0))
goto fail1;
/* Simulator may not support it */ /* Simulator may not support it */
if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL)) { if (checking_wrmsrl(MSR_K7_EVNTSEL0, 0UL))
nmi_perfctr_msr = 0; goto fail2;
return; wrmsrl(MSR_K7_PERFCTR0, 0UL);
}
wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
}
evntsel = K7_EVNTSEL_INT evntsel = K7_EVNTSEL_INT
| K7_EVNTSEL_OS | K7_EVNTSEL_OS
...@@ -364,6 +455,13 @@ static void setup_k7_watchdog(void) ...@@ -364,6 +455,13 @@ static void setup_k7_watchdog(void)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= K7_EVNTSEL_ENABLE; evntsel |= K7_EVNTSEL_ENABLE;
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
return 1;
fail2:
release_evntsel_nmi(MSR_K7_EVNTSEL0);
fail1:
release_perfctr_nmi(nmi_perfctr_msr);
fail:
return 0;
} }
...@@ -382,22 +480,11 @@ static int setup_p4_watchdog(void) ...@@ -382,22 +480,11 @@ static int setup_p4_watchdog(void)
nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1; nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
#endif #endif
if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL)) if (!reserve_perfctr_nmi(nmi_perfctr_msr))
clear_msr_range(0x3F1, 2); goto fail;
/* MSR 0x3F0 seems to have a default value of 0xFC00, but current
docs doesn't fully define it, so leave it alone for now. */ if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
if (boot_cpu_data.x86_model >= 0x3) { goto fail1;
/* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
clear_msr_range(0x3A0, 26);
clear_msr_range(0x3BC, 3);
} else {
clear_msr_range(0x3A0, 31);
}
clear_msr_range(0x3C0, 6);
clear_msr_range(0x3C8, 6);
clear_msr_range(0x3E0, 2);
clear_msr_range(MSR_P4_CCCR0, 18);
clear_msr_range(MSR_P4_PERFCTR0, 18);
wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0); wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0); wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
...@@ -406,6 +493,10 @@ static int setup_p4_watchdog(void) ...@@ -406,6 +493,10 @@ static int setup_p4_watchdog(void)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
return 1; return 1;
fail1:
release_perfctr_nmi(nmi_perfctr_msr);
fail:
return 0;
} }
void setup_apic_nmi_watchdog(void) void setup_apic_nmi_watchdog(void)
...@@ -416,7 +507,8 @@ void setup_apic_nmi_watchdog(void) ...@@ -416,7 +507,8 @@ void setup_apic_nmi_watchdog(void)
return; return;
if (strstr(boot_cpu_data.x86_model_id, "Screwdriver")) if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
return; return;
setup_k7_watchdog(); if (!setup_k7_watchdog())
return;
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (boot_cpu_data.x86 != 15) if (boot_cpu_data.x86 != 15)
...@@ -588,6 +680,12 @@ int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file ...@@ -588,6 +680,12 @@ int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file
EXPORT_SYMBOL(nmi_active); EXPORT_SYMBOL(nmi_active);
EXPORT_SYMBOL(nmi_watchdog); EXPORT_SYMBOL(nmi_watchdog);
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
EXPORT_SYMBOL(reserve_perfctr_nmi);
EXPORT_SYMBOL(release_perfctr_nmi);
EXPORT_SYMBOL(reserve_evntsel_nmi);
EXPORT_SYMBOL(release_evntsel_nmi);
EXPORT_SYMBOL(reserve_lapic_nmi); EXPORT_SYMBOL(reserve_lapic_nmi);
EXPORT_SYMBOL(release_lapic_nmi); EXPORT_SYMBOL(release_lapic_nmi);
EXPORT_SYMBOL(disable_timer_nmi_watchdog); EXPORT_SYMBOL(disable_timer_nmi_watchdog);
......
...@@ -25,6 +25,13 @@ void set_nmi_callback(nmi_callback_t callback); ...@@ -25,6 +25,13 @@ void set_nmi_callback(nmi_callback_t callback);
*/ */
void unset_nmi_callback(void); void unset_nmi_callback(void);
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int avail_to_resrv_perfctr_nmi(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int);
extern void release_evntsel_nmi(unsigned int);
extern void setup_apic_nmi_watchdog (void); extern void setup_apic_nmi_watchdog (void);
extern int reserve_lapic_nmi(void); extern int reserve_lapic_nmi(void);
extern void release_lapic_nmi(void); extern void release_lapic_nmi(void);
......
...@@ -56,6 +56,12 @@ extern int panic_on_timeout; ...@@ -56,6 +56,12 @@ extern int panic_on_timeout;
extern int unknown_nmi_panic; extern int unknown_nmi_panic;
extern int check_nmi_watchdog(void); extern int check_nmi_watchdog(void);
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
extern int avail_to_resrv_perfctr_nmi(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int);
extern void release_evntsel_nmi(unsigned int);
extern void setup_apic_nmi_watchdog (void); extern void setup_apic_nmi_watchdog (void);
extern int reserve_lapic_nmi(void); extern int reserve_lapic_nmi(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment