Commit c4698981 authored by Lorenzo Pieralisi's avatar Lorenzo Pieralisi Committed by Kamal Mostafa

arm64: kernel: fix architected PMU registers unconditional access

commit f436b2ac upstream.

The Performance Monitors extension is an optional feature of the
AArch64 architecture, therefore, in order to access Performance
Monitors registers safely, the kernel should detect the architected
PMU unit presence through the ID_AA64DFR0_EL1 register PMUVer field
before accessing them.

This patch implements a guard by reading the ID_AA64DFR0_EL1 register
PMUVer field to detect the architected PMU presence and prevent accessing
PMU system registers if the Performance Monitors extension is not
implemented in the core.

Cc: Peter Maydell <peter.maydell@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Fixes: 60792ad3 ("arm64: kernel: enforce pmuserenr_el0 initialization and restore")
Signed-off-by: default avatarLorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Reported-by: default avatarGuenter Roeck <linux@roeck-us.net>
Tested-by: default avatarGuenter Roeck <linux@roeck-us.net>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarKamal Mostafa <kamal@canonical.com>
parent 38c15807
...@@ -529,9 +529,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems ...@@ -529,9 +529,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
#endif #endif
/* EL2 debug */ /* EL2 debug */
mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
sbfx x0, x0, #8, #4
cmp x0, #1
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to ubfx x0, x0, #11, #5 // to EL2 and allow access to
msr mdcr_el2, x0 // all PMU counters from EL1 msr mdcr_el2, x0 // all PMU counters from EL1
4:
/* Stage-2 translation */ /* Stage-2 translation */
msr vttbr_el2, xzr msr vttbr_el2, xzr
......
...@@ -62,3 +62,15 @@ ...@@ -62,3 +62,15 @@
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
#endif #endif
.endm .endm
/*
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
*/
.macro reset_pmuserenr_el0, tmpreg
mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
sbfx \tmpreg, \tmpreg, #8, #4
cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0
9000:
.endm
...@@ -119,7 +119,7 @@ ENTRY(cpu_do_resume) ...@@ -119,7 +119,7 @@ ENTRY(cpu_do_resume)
*/ */
ubfx x11, x11, #1, #1 ubfx x11, x11, #1, #1
msr oslar_el1, x11 msr oslar_el1, x11
msr pmuserenr_el0, xzr // Disable PMU access from EL0 reset_pmuserenr_el0 x0 // Disable PMU access from EL0
mov x0, x12 mov x0, x12
dsb nsh // Make sure local tlb invalidation completed dsb nsh // Make sure local tlb invalidation completed
isb isb
...@@ -159,7 +159,7 @@ ENTRY(__cpu_setup) ...@@ -159,7 +159,7 @@ ENTRY(__cpu_setup)
msr cpacr_el1, x0 // Enable FP/ASIMD msr cpacr_el1, x0 // Enable FP/ASIMD
mov x0, #1 << 12 // Reset mdscr_el1 and disable mov x0, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x0 // access to the DCC from EL0 msr mdscr_el1, x0 // access to the DCC from EL0
msr pmuserenr_el0, xzr // Disable PMU access from EL0 reset_pmuserenr_el0 x0 // Disable PMU access from EL0
/* /*
* Memory region attributes for LPAE: * Memory region attributes for LPAE:
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment