Commit 8cf3478f authored by Michael Ellerman's avatar Michael Ellerman Committed by Greg Kroah-Hartman

powerpc/perf: Rework disable logic in pmu_disable()

commit 378a6ee9 upstream.

In pmu_disable() we disable the PMU by setting the FC (Freeze Counters)
bit in MMCR0. In order to do this we have to read/modify/write MMCR0.

It's possible that we read a value from MMCR0 which has PMAO (PMU Alert
Occurred) set. When we write that value back it will cause an interrupt
to occur. We will then end up in the PMU interrupt handler even though
we are supposed to have just disabled the PMU.

We can avoid this by making sure we never write PMAO back. We should not
lose interrupts because when the PMU is re-enabled the overflowed values
will cause another interrupt.

We also reorder the clearing of SAMPLE_ENABLE so that is done after the
PMU is frozen. Otherwise there is a small window between the clearing of
SAMPLE_ENABLE and the setting of FC where we could take an interrupt and
incorrectly see SAMPLE_ENABLE not set. This would for example change the
logic in perf_read_regs().
Signed-off-by: default avatarMichael Ellerman <michael@ellerman.id.au>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent a9514fe5
...@@ -75,6 +75,7 @@ static unsigned int freeze_events_kernel = MMCR0_FCS; ...@@ -75,6 +75,7 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
#define MMCR0_FCHV 0 #define MMCR0_FCHV 0
#define MMCR0_PMCjCE MMCR0_PMCnCE #define MMCR0_PMCjCE MMCR0_PMCnCE
#define MMCR0_PMAO 0
#define SPRN_MMCRA SPRN_MMCR2 #define SPRN_MMCRA SPRN_MMCR2
#define MMCRA_SAMPLE_ENABLE 0 #define MMCRA_SAMPLE_ENABLE 0
...@@ -852,7 +853,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) ...@@ -852,7 +853,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
static void power_pmu_disable(struct pmu *pmu) static void power_pmu_disable(struct pmu *pmu)
{ {
struct cpu_hw_events *cpuhw; struct cpu_hw_events *cpuhw;
unsigned long flags; unsigned long flags, val;
if (!ppmu) if (!ppmu)
return; return;
...@@ -860,9 +861,6 @@ static void power_pmu_disable(struct pmu *pmu) ...@@ -860,9 +861,6 @@ static void power_pmu_disable(struct pmu *pmu)
cpuhw = &__get_cpu_var(cpu_hw_events); cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) { if (!cpuhw->disabled) {
cpuhw->disabled = 1;
cpuhw->n_added = 0;
/* /*
* Check if we ever enabled the PMU on this cpu. * Check if we ever enabled the PMU on this cpu.
*/ */
...@@ -871,6 +869,21 @@ static void power_pmu_disable(struct pmu *pmu) ...@@ -871,6 +869,21 @@ static void power_pmu_disable(struct pmu *pmu)
cpuhw->pmcs_enabled = 1; cpuhw->pmcs_enabled = 1;
} }
/*
* Set the 'freeze counters' bit, clear PMAO.
*/
val = mfspr(SPRN_MMCR0);
val |= MMCR0_FC;
val &= ~MMCR0_PMAO;
/*
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events etc.
* before we return.
*/
write_mmcr0(cpuhw, val);
mb();
/* /*
* Disable instruction sampling if it was enabled * Disable instruction sampling if it was enabled
*/ */
...@@ -880,14 +893,8 @@ static void power_pmu_disable(struct pmu *pmu) ...@@ -880,14 +893,8 @@ static void power_pmu_disable(struct pmu *pmu)
mb(); mb();
} }
/* cpuhw->disabled = 1;
* Set the 'freeze counters' bit. cpuhw->n_added = 0;
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the events
* before we return.
*/
write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
mb();
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment