Commit 1dd498e5 authored by James Morse's avatar James Morse Committed by Marc Zyngier

KVM: arm64: Workaround Cortex-A510's single-step and PAC trap errata

Cortex-A510's erratum #2077057 causes SPSR_EL2 to be corrupted when
single-stepping authenticated ERET instructions. A single step is
expected, but a pointer authentication trap is taken instead. The
erratum causes SPSR_EL1 to be copied to SPSR_EL2, which could allow
EL1 to cause a return to EL2 with a guest controlled ELR_EL2.

Because the conditions require an ERET into active-not-pending state,
this is only a problem for the EL2 when EL2 is stepping EL1. In this case
the previous SPSR_EL2 value is preserved in struct kvm_vcpu, and can be
restored.

Cc: stable@vger.kernel.org # 53960faf: arm64: Add Cortex-A510 CPU part definition
Cc: stable@vger.kernel.org
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
[maz: fixup cpucaps ordering]
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220127122052.1584324-5-james.morse@arm.com
parent 1229630a
...@@ -100,6 +100,8 @@ stable kernels. ...@@ -100,6 +100,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 | | ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
......
...@@ -680,6 +680,22 @@ config ARM64_ERRATUM_2051678 ...@@ -680,6 +680,22 @@ config ARM64_ERRATUM_2051678
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_2077057
bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2"
help
This option adds the workaround for ARM Cortex-A510 erratum 2077057.
Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is
expected, but a Pointer Authentication trap is taken instead. The
erratum causes SPSR_EL1 to be copied to SPSR_EL2, which could allow
EL1 to cause a return to EL2 with a guest controlled ELR_EL2.
This can only happen when EL2 is stepping EL1.
When these conditions occur, the SPSR_EL2 value is unchanged from the
previous guest entry, and can be restored from the in-memory copy.
If unsure, say Y.
config ARM64_ERRATUM_2119858 config ARM64_ERRATUM_2119858
bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode" bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
default y default y
......
...@@ -600,6 +600,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -600,6 +600,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_2077057
{
.desc = "ARM erratum 2077057",
.capability = ARM64_WORKAROUND_2077057,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2064142 #ifdef CONFIG_ARM64_ERRATUM_2064142
{ {
.desc = "ARM erratum 2064142", .desc = "ARM erratum 2064142",
......
...@@ -402,6 +402,24 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -402,6 +402,24 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
return false; return false;
} }
static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* Check for the conditions of Cortex-A510's #2077057. When these occur
* SPSR_EL2 can't be trusted, but isn't needed either as it is
* unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
* Are we single-stepping the guest, and took a PAC exception from the
* active-not-pending state?
*/
if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
*vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
}
/* /*
* Return true when we were able to fixup the guest exit and should return to * Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the * the guest, false when we should restore the host state and return to the
...@@ -413,7 +431,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -413,7 +431,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* Save PSTATE early so that we can evaluate the vcpu mode * Save PSTATE early so that we can evaluate the vcpu mode
* early on. * early on.
*/ */
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); synchronize_vcpu_pstate(vcpu, exit_code);
/* /*
* Check whether we want to repaint the state one way or * Check whether we want to repaint the state one way or
......
...@@ -55,9 +55,10 @@ WORKAROUND_1418040 ...@@ -55,9 +55,10 @@ WORKAROUND_1418040
WORKAROUND_1463225 WORKAROUND_1463225
WORKAROUND_1508412 WORKAROUND_1508412
WORKAROUND_1542419 WORKAROUND_1542419
WORKAROUND_2064142
WORKAROUND_2038923
WORKAROUND_1902691 WORKAROUND_1902691
WORKAROUND_2038923
WORKAROUND_2064142
WORKAROUND_2077057
WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment