Commit 156b5371 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/perf: move perf irq/nmi handling details into traps.c

This is required in order to allow more significant differences between
NMI type interrupt handlers and regular asynchronous handlers.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210130130852.2952424-20-npiggin@gmail.com
parent 3a313883
...@@ -1892,11 +1892,40 @@ void vsx_unavailable_tm(struct pt_regs *regs) ...@@ -1892,11 +1892,40 @@ void vsx_unavailable_tm(struct pt_regs *regs)
} }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
void performance_monitor_exception(struct pt_regs *regs) static void performance_monitor_exception_nmi(struct pt_regs *regs)
{
nmi_enter();
__this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs);
nmi_exit();
}
static void performance_monitor_exception_async(struct pt_regs *regs)
{ {
irq_enter();
__this_cpu_inc(irq_stat.pmu_irqs); __this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs); perf_irq(regs);
irq_exit();
}
void performance_monitor_exception(struct pt_regs *regs)
{
/*
* On 64-bit, if perf interrupts hit in a local_irq_disable
* (soft-masked) region, we consider them as NMIs. This is required to
* prevent hash faults on user addresses when reading callchains (and
* looks better from an irq tracing perspective).
*/
if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
performance_monitor_exception_nmi(regs);
else
performance_monitor_exception_async(regs);
} }
#ifdef CONFIG_PPC_ADV_DEBUG_REGS #ifdef CONFIG_PPC_ADV_DEBUG_REGS
......
...@@ -110,10 +110,6 @@ static inline void perf_read_regs(struct pt_regs *regs) ...@@ -110,10 +110,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
{ {
regs->result = 0; regs->result = 0;
} }
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return 0;
}
static inline int siar_valid(struct pt_regs *regs) static inline int siar_valid(struct pt_regs *regs)
{ {
...@@ -353,15 +349,6 @@ static inline void perf_read_regs(struct pt_regs *regs) ...@@ -353,15 +349,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
regs->result = use_siar; regs->result = use_siar;
} }
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return (regs->softe & IRQS_DISABLED);
}
/* /*
* On processors like P7+ that have the SIAR-Valid bit, marked instructions * On processors like P7+ that have the SIAR-Valid bit, marked instructions
* must be sampled only if the SIAR-valid bit is set. * must be sampled only if the SIAR-valid bit is set.
...@@ -2279,7 +2266,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) ...@@ -2279,7 +2266,6 @@ static void __perf_event_interrupt(struct pt_regs *regs)
struct perf_event *event; struct perf_event *event;
unsigned long val[8]; unsigned long val[8];
int found, active; int found, active;
int nmi;
if (cpuhw->n_limited) if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
...@@ -2287,18 +2273,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) ...@@ -2287,18 +2273,6 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs); perf_read_regs(regs);
/*
* If perf interrupts hit in a local_irq_disable (soft-masked) region,
* we consider them as NMIs. This is required to prevent hash faults on
* user addresses when reading callchains. See the NMI test in
* do_hash_page.
*/
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
/* Read all the PMCs since we'll need them a bunch of times */ /* Read all the PMCs since we'll need them a bunch of times */
for (i = 0; i < ppmu->n_counter; ++i) for (i = 0; i < ppmu->n_counter; ++i)
val[i] = read_pmc(i + 1); val[i] = read_pmc(i + 1);
...@@ -2344,8 +2318,8 @@ static void __perf_event_interrupt(struct pt_regs *regs) ...@@ -2344,8 +2318,8 @@ static void __perf_event_interrupt(struct pt_regs *regs)
} }
} }
} }
if (!found && !nmi && printk_ratelimit()) if (unlikely(!found) && !arch_irq_disabled_regs(regs))
printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
/* /*
* Reset MMCR0 to its normal value. This will set PMXE and * Reset MMCR0 to its normal value. This will set PMXE and
...@@ -2355,11 +2329,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) ...@@ -2355,11 +2329,6 @@ static void __perf_event_interrupt(struct pt_regs *regs)
* we get back out of this interrupt. * we get back out of this interrupt.
*/ */
write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0); write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0);
if (nmi)
nmi_exit();
else
irq_exit();
} }
static void perf_event_interrupt(struct pt_regs *regs) static void perf_event_interrupt(struct pt_regs *regs)
......
...@@ -31,19 +31,6 @@ static atomic_t num_events; ...@@ -31,19 +31,6 @@ static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */ /* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex); static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
#ifdef __powerpc64__
return (regs->softe & IRQS_DISABLED);
#else
return 0;
#endif
}
static void perf_event_interrupt(struct pt_regs *regs); static void perf_event_interrupt(struct pt_regs *regs);
/* /*
...@@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs) ...@@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
struct perf_event *event; struct perf_event *event;
unsigned long val; unsigned long val;
int found = 0; int found = 0;
int nmi;
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) { for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i]; event = cpuhw->event[i];
...@@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs) ...@@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
mtmsr(mfmsr() | MSR_PMM); mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync(); isync();
if (nmi)
nmi_exit();
else
irq_exit();
} }
void hw_perf_event_setup(int cpu) void hw_perf_event_setup(int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment