Commit 004417a6 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf, arch: Cleanup perf-pmu init vs lockup-detector

The perf hardware pmu got initialized at various points in the boot,
some before early_initcall() some after (notably arch_initcall).

The problem is that the NMI lockup detector is ran from early_initcall()
and expects the hardware pmu to be present.

Sanitize this by moving all architecture hardware pmu implementations to
initialize at early_initcall() and move the lockup detector to an explicit
initcall right after that.

Cc: paulus <paulus@samba.org>
Cc: davem <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Acked-by: default avatarPaul Mundt <lethal@linux-sh.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1290707759.2145.119.camel@laptop>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 5ef428c4
#ifndef __ASM_ALPHA_PERF_EVENT_H #ifndef __ASM_ALPHA_PERF_EVENT_H
#define __ASM_ALPHA_PERF_EVENT_H #define __ASM_ALPHA_PERF_EVENT_H
#ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
#else
static inline void init_hw_perf_events(void) { }
#endif
#endif /* __ASM_ALPHA_PERF_EVENT_H */ #endif /* __ASM_ALPHA_PERF_EVENT_H */
...@@ -112,8 +112,6 @@ init_IRQ(void) ...@@ -112,8 +112,6 @@ init_IRQ(void)
wrent(entInt, 0); wrent(entInt, 0);
alpha_mv.init_irq(); alpha_mv.init_irq();
init_hw_perf_events();
} }
/* /*
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/init.h>
#include <asm/hwrpb.h> #include <asm/hwrpb.h>
#include <asm/atomic.h> #include <asm/atomic.h>
...@@ -863,13 +864,13 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, ...@@ -863,13 +864,13 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
/* /*
* Init call to initialise performance events at kernel startup. * Init call to initialise performance events at kernel startup.
*/ */
void __init init_hw_perf_events(void) int __init init_hw_perf_events(void)
{ {
pr_info("Performance events: "); pr_info("Performance events: ");
if (!supported_cpu()) { if (!supported_cpu()) {
pr_cont("No support for your CPU.\n"); pr_cont("No support for your CPU.\n");
return; return 0;
} }
pr_cont("Supported CPU type!\n"); pr_cont("Supported CPU type!\n");
...@@ -882,5 +883,7 @@ void __init init_hw_perf_events(void) ...@@ -882,5 +883,7 @@ void __init init_hw_perf_events(void)
alpha_pmu = &ev67_pmu; alpha_pmu = &ev67_pmu;
perf_pmu_register(&pmu); perf_pmu_register(&pmu);
}
return 0;
}
early_initcall(init_hw_perf_events);
...@@ -3038,7 +3038,7 @@ init_hw_perf_events(void) ...@@ -3038,7 +3038,7 @@ init_hw_perf_events(void)
return 0; return 0;
} }
arch_initcall(init_hw_perf_events); early_initcall(init_hw_perf_events);
/* /*
* Callchain handling code. * Callchain handling code.
......
...@@ -1047,6 +1047,6 @@ init_hw_perf_events(void) ...@@ -1047,6 +1047,6 @@ init_hw_perf_events(void)
return 0; return 0;
} }
arch_initcall(init_hw_perf_events); early_initcall(init_hw_perf_events);
#endif /* defined(CONFIG_CPU_MIPS32)... */ #endif /* defined(CONFIG_CPU_MIPS32)... */
...@@ -126,4 +126,4 @@ static int init_e500_pmu(void) ...@@ -126,4 +126,4 @@ static int init_e500_pmu(void)
return register_fsl_emb_pmu(&e500_pmu); return register_fsl_emb_pmu(&e500_pmu);
} }
arch_initcall(init_e500_pmu); early_initcall(init_e500_pmu);
...@@ -414,4 +414,4 @@ static int init_mpc7450_pmu(void) ...@@ -414,4 +414,4 @@ static int init_mpc7450_pmu(void)
return register_power_pmu(&mpc7450_pmu); return register_power_pmu(&mpc7450_pmu);
} }
arch_initcall(init_mpc7450_pmu); early_initcall(init_mpc7450_pmu);
...@@ -613,4 +613,4 @@ static int init_power4_pmu(void) ...@@ -613,4 +613,4 @@ static int init_power4_pmu(void)
return register_power_pmu(&power4_pmu); return register_power_pmu(&power4_pmu);
} }
arch_initcall(init_power4_pmu); early_initcall(init_power4_pmu);
...@@ -682,4 +682,4 @@ static int init_power5p_pmu(void) ...@@ -682,4 +682,4 @@ static int init_power5p_pmu(void)
return register_power_pmu(&power5p_pmu); return register_power_pmu(&power5p_pmu);
} }
arch_initcall(init_power5p_pmu); early_initcall(init_power5p_pmu);
...@@ -621,4 +621,4 @@ static int init_power5_pmu(void) ...@@ -621,4 +621,4 @@ static int init_power5_pmu(void)
return register_power_pmu(&power5_pmu); return register_power_pmu(&power5_pmu);
} }
arch_initcall(init_power5_pmu); early_initcall(init_power5_pmu);
...@@ -544,4 +544,4 @@ static int init_power6_pmu(void) ...@@ -544,4 +544,4 @@ static int init_power6_pmu(void)
return register_power_pmu(&power6_pmu); return register_power_pmu(&power6_pmu);
} }
arch_initcall(init_power6_pmu); early_initcall(init_power6_pmu);
...@@ -369,4 +369,4 @@ static int init_power7_pmu(void) ...@@ -369,4 +369,4 @@ static int init_power7_pmu(void)
return register_power_pmu(&power7_pmu); return register_power_pmu(&power7_pmu);
} }
arch_initcall(init_power7_pmu); early_initcall(init_power7_pmu);
...@@ -494,4 +494,4 @@ static int init_ppc970_pmu(void) ...@@ -494,4 +494,4 @@ static int init_ppc970_pmu(void)
return register_power_pmu(&ppc970_pmu); return register_power_pmu(&ppc970_pmu);
} }
arch_initcall(init_ppc970_pmu); early_initcall(init_ppc970_pmu);
...@@ -250,4 +250,4 @@ static int __init sh7750_pmu_init(void) ...@@ -250,4 +250,4 @@ static int __init sh7750_pmu_init(void)
return register_sh_pmu(&sh7750_pmu); return register_sh_pmu(&sh7750_pmu);
} }
arch_initcall(sh7750_pmu_init); early_initcall(sh7750_pmu_init);
...@@ -284,4 +284,4 @@ static int __init sh4a_pmu_init(void) ...@@ -284,4 +284,4 @@ static int __init sh4a_pmu_init(void)
return register_sh_pmu(&sh4a_pmu); return register_sh_pmu(&sh4a_pmu);
} }
arch_initcall(sh4a_pmu_init); early_initcall(sh4a_pmu_init);
...@@ -4,8 +4,6 @@ ...@@ -4,8 +4,6 @@
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
#include <asm/ptrace.h> #include <asm/ptrace.h>
extern void init_hw_perf_events(void);
#define perf_arch_fetch_caller_regs(regs, ip) \ #define perf_arch_fetch_caller_regs(regs, ip) \
do { \ do { \
unsigned long _pstate, _asi, _pil, _i7, _fp; \ unsigned long _pstate, _asi, _pil, _i7, _fp; \
...@@ -26,8 +24,6 @@ do { \ ...@@ -26,8 +24,6 @@ do { \
(regs)->u_regs[UREG_I6] = _fp; \ (regs)->u_regs[UREG_I6] = _fp; \
(regs)->u_regs[UREG_I7] = _i7; \ (regs)->u_regs[UREG_I7] = _i7; \
} while (0) } while (0)
#else
static inline void init_hw_perf_events(void) { }
#endif #endif
#endif #endif
...@@ -270,8 +270,6 @@ int __init nmi_init(void) ...@@ -270,8 +270,6 @@ int __init nmi_init(void)
atomic_set(&nmi_active, -1); atomic_set(&nmi_active, -1);
} }
} }
if (!err)
init_hw_perf_events();
return err; return err;
} }
......
...@@ -1307,20 +1307,23 @@ static bool __init supported_pmu(void) ...@@ -1307,20 +1307,23 @@ static bool __init supported_pmu(void)
return false; return false;
} }
void __init init_hw_perf_events(void) int __init init_hw_perf_events(void)
{ {
pr_info("Performance events: "); pr_info("Performance events: ");
if (!supported_pmu()) { if (!supported_pmu()) {
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
return; return 0;
} }
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
perf_pmu_register(&pmu); perf_pmu_register(&pmu);
register_die_notifier(&perf_event_nmi_notifier); register_die_notifier(&perf_event_nmi_notifier);
return 0;
} }
early_initcall(init_hw_perf_event);
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs) struct pt_regs *regs)
......
...@@ -125,7 +125,6 @@ union cpuid10_edx { ...@@ -125,7 +125,6 @@ union cpuid10_edx {
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */ #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void);
extern void perf_events_lapic_init(void); extern void perf_events_lapic_init(void);
#define PERF_EVENT_INDEX_OFFSET 0 #define PERF_EVENT_INDEX_OFFSET 0
...@@ -156,7 +155,6 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); ...@@ -156,7 +155,6 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
} }
#else #else
static inline void init_hw_perf_events(void) { }
static inline void perf_events_lapic_init(void) { } static inline void perf_events_lapic_init(void) { }
#endif #endif
......
...@@ -894,7 +894,6 @@ void __init identify_boot_cpu(void) ...@@ -894,7 +894,6 @@ void __init identify_boot_cpu(void)
#else #else
vgetcpu_set_mode(); vgetcpu_set_mode();
#endif #endif
init_hw_perf_events();
} }
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
......
...@@ -1353,7 +1353,7 @@ static void __init pmu_check_apic(void) ...@@ -1353,7 +1353,7 @@ static void __init pmu_check_apic(void)
pr_info("no hardware sampling interrupt available.\n"); pr_info("no hardware sampling interrupt available.\n");
} }
void __init init_hw_perf_events(void) int __init init_hw_perf_events(void)
{ {
struct event_constraint *c; struct event_constraint *c;
int err; int err;
...@@ -1368,11 +1368,11 @@ void __init init_hw_perf_events(void) ...@@ -1368,11 +1368,11 @@ void __init init_hw_perf_events(void)
err = amd_pmu_init(); err = amd_pmu_init();
break; break;
default: default:
return; return 0;
} }
if (err != 0) { if (err != 0) {
pr_cont("no PMU driver, software events only.\n"); pr_cont("no PMU driver, software events only.\n");
return; return 0;
} }
pmu_check_apic(); pmu_check_apic();
...@@ -1380,7 +1380,7 @@ void __init init_hw_perf_events(void) ...@@ -1380,7 +1380,7 @@ void __init init_hw_perf_events(void)
/* sanity check that the hardware exists or is emulated */ /* sanity check that the hardware exists or is emulated */
if (!check_hw_exists()) { if (!check_hw_exists()) {
pr_cont("Broken PMU hardware detected, software events only.\n"); pr_cont("Broken PMU hardware detected, software events only.\n");
return; return 0;
} }
pr_cont("%s PMU driver.\n", x86_pmu.name); pr_cont("%s PMU driver.\n", x86_pmu.name);
...@@ -1431,7 +1431,10 @@ void __init init_hw_perf_events(void) ...@@ -1431,7 +1431,10 @@ void __init init_hw_perf_events(void)
perf_pmu_register(&pmu); perf_pmu_register(&pmu);
perf_cpu_notifier(x86_pmu_notifier); perf_cpu_notifier(x86_pmu_notifier);
return 0;
} }
early_initcall(init_hw_perf_events);
static inline void x86_pmu_read(struct perf_event *event) static inline void x86_pmu_read(struct perf_event *event)
{ {
......
...@@ -316,6 +316,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, ...@@ -316,6 +316,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
size_t *lenp, loff_t *ppos); size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic; extern unsigned int softlockup_panic;
extern int softlockup_thresh; extern int softlockup_thresh;
void lockup_detector_init(void);
#else #else
static inline void touch_softlockup_watchdog(void) static inline void touch_softlockup_watchdog(void)
{ {
...@@ -326,6 +327,9 @@ static inline void touch_softlockup_watchdog_sync(void) ...@@ -326,6 +327,9 @@ static inline void touch_softlockup_watchdog_sync(void)
static inline void touch_all_softlockup_watchdogs(void) static inline void touch_all_softlockup_watchdogs(void)
{ {
} }
static inline void lockup_detector_init(void)
{
}
#endif #endif
#ifdef CONFIG_DETECT_HUNG_TASK #ifdef CONFIG_DETECT_HUNG_TASK
......
...@@ -882,6 +882,7 @@ static int __init kernel_init(void * unused) ...@@ -882,6 +882,7 @@ static int __init kernel_init(void * unused)
smp_prepare_cpus(setup_max_cpus); smp_prepare_cpus(setup_max_cpus);
do_pre_smp_initcalls(); do_pre_smp_initcalls();
lockup_detector_init();
smp_init(); smp_init();
sched_init_smp(); sched_init_smp();
......
...@@ -547,13 +547,13 @@ static struct notifier_block __cpuinitdata cpu_nfb = { ...@@ -547,13 +547,13 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
.notifier_call = cpu_callback .notifier_call = cpu_callback
}; };
static int __init spawn_watchdog_task(void) void __init lockup_detector_init(void)
{ {
void *cpu = (void *)(long)smp_processor_id(); void *cpu = (void *)(long)smp_processor_id();
int err; int err;
if (no_watchdog) if (no_watchdog)
return 0; return;
err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
WARN_ON(notifier_to_errno(err)); WARN_ON(notifier_to_errno(err));
...@@ -561,6 +561,5 @@ static int __init spawn_watchdog_task(void) ...@@ -561,6 +561,5 @@ static int __init spawn_watchdog_task(void)
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
register_cpu_notifier(&cpu_nfb); register_cpu_notifier(&cpu_nfb);
return 0; return;
} }
early_initcall(spawn_watchdog_task);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment