Commit c4792b6d authored by Will Deacon's avatar Will Deacon Committed by Marc Zyngier

arm64: spectre: Rename ARM64_HARDEN_EL2_VECTORS to ARM64_SPECTRE_V3A

Since ARM64_HARDEN_EL2_VECTORS is really a mitigation for Spectre-v3a,
rename it accordingly for consistency with the v2 and v4 mitigation.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20201113113847.21619-9-will@kernel.org
parent b881cdce
...@@ -100,7 +100,7 @@ hypervisor maps kernel pages in EL2 at a fixed (and potentially ...@@ -100,7 +100,7 @@ hypervisor maps kernel pages in EL2 at a fixed (and potentially
random) offset from the linear mapping. See the kern_hyp_va macro and random) offset from the linear mapping. See the kern_hyp_va macro and
kvm_update_va_mask function for more details. MMIO devices such as kvm_update_va_mask function for more details. MMIO devices such as
GICv2 gets mapped next to the HYP idmap page, as do vectors when GICv2 gets mapped next to the HYP idmap page, as do vectors when
ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs. ARM64_SPECTRE_V3A is enabled for particular CPUs.
When using KVM with the Virtualization Host Extensions, no additional When using KVM with the Virtualization Host Extensions, no additional
mappings are created, since the host kernel runs directly in EL2. mappings are created, since the host kernel runs directly in EL2.
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define ARM64_HAS_VIRT_HOST_EXTN 11 #define ARM64_HAS_VIRT_HOST_EXTN 11
#define ARM64_WORKAROUND_CAVIUM_27456 12 #define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13 #define ARM64_HAS_32BIT_EL0 13
#define ARM64_HARDEN_EL2_VECTORS 14 #define ARM64_SPECTRE_V3A 14
#define ARM64_HAS_CNP 15 #define ARM64_HAS_CNP 15
#define ARM64_HAS_NO_FPSIMD 16 #define ARM64_HAS_NO_FPSIMD 16
#define ARM64_WORKAROUND_REPEAT_TLBI 17 #define ARM64_WORKAROUND_REPEAT_TLBI 17
......
...@@ -83,7 +83,7 @@ enum mitigation_state arm64_get_spectre_v2_state(void); ...@@ -83,7 +83,7 @@ enum mitigation_state arm64_get_spectre_v2_state(void);
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope); bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused); void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
void cpu_el2_vector_harden_enable(const struct arm64_cpu_capabilities *__unused); void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
enum mitigation_state arm64_get_spectre_v4_state(void); enum mitigation_state arm64_get_spectre_v4_state(void);
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope); bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
......
...@@ -460,10 +460,10 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -460,10 +460,10 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
{ {
/* Must come after the Spectre-v2 entry */ /* Must come after the Spectre-v2 entry */
.desc = "EL2 vector hardening", .desc = "Spectre-v3a",
.capability = ARM64_HARDEN_EL2_VECTORS, .capability = ARM64_SPECTRE_V3A,
ERRATA_MIDR_RANGE_LIST(ca57_a72), ERRATA_MIDR_RANGE_LIST(ca57_a72),
.cpu_enable = cpu_el2_vector_harden_enable, .cpu_enable = spectre_v3a_enable_mitigation,
}, },
#endif #endif
{ {
......
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
/* /*
* Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
* detailed at: * detailed at:
* *
* https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
...@@ -270,11 +270,18 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused) ...@@ -270,11 +270,18 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
update_mitigation_state(&spectre_v2_state, state); update_mitigation_state(&spectre_v2_state, state);
} }
void cpu_el2_vector_harden_enable(const struct arm64_cpu_capabilities *__unused) /*
* Spectre-v3a.
*
* Phew, there's not an awful lot to do here! We just instruct EL2 to use
* an indirect trampoline for the hyp vectors so that guests can't read
* VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
*/
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
{ {
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS)) if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
data->slot += HYP_VECTOR_INDIRECT; data->slot += HYP_VECTOR_INDIRECT;
} }
......
...@@ -1314,7 +1314,7 @@ static int kvm_init_vector_slots(void) ...@@ -1314,7 +1314,7 @@ static int kvm_init_vector_slots(void)
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
return 0; return 0;
if (!has_vhe()) { if (!has_vhe()) {
...@@ -1388,15 +1388,15 @@ static void cpu_hyp_reset(void) ...@@ -1388,15 +1388,15 @@ static void cpu_hyp_reset(void)
* placed in one of the vector slots, which is executed before jumping * placed in one of the vector slots, which is executed before jumping
* to the real vectors. * to the real vectors.
* *
* - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
* containing the hardening sequence is mapped next to the idmap page, * containing the hardening sequence is mapped next to the idmap page,
* and executed before jumping to the real vectors. * and executed before jumping to the real vectors.
* *
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
* empty slot is selected, mapped next to the idmap page, and * empty slot is selected, mapped next to the idmap page, and
* executed before jumping to the real vectors. * executed before jumping to the real vectors.
* *
* Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with * Note that ARM64_SPECTRE_V3A is somewhat incompatible with
* VHE, as we don't have hypervisor-specific mappings. If the system * VHE, as we don't have hypervisor-specific mappings. If the system
* is VHE and yet selects this capability, it will be ignored. * is VHE and yet selects this capability, it will be ignored.
*/ */
......
...@@ -209,8 +209,7 @@ SYM_CODE_END(__kvm_hyp_vector) ...@@ -209,8 +209,7 @@ SYM_CODE_END(__kvm_hyp_vector)
.if \indirect != 0 .if \indirect != 0
alternative_cb kvm_patch_vector_branch alternative_cb kvm_patch_vector_branch
/* /*
* For ARM64_HARDEN_EL2_VECTORS configurations, these NOPs get replaced * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
* with:
* *
* movz x0, #(addr & 0xffff) * movz x0, #(addr & 0xffff)
* movk x0, #((addr >> 16) & 0xffff), lsl #16 * movk x0, #((addr >> 16) & 0xffff), lsl #16
......
...@@ -139,10 +139,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt, ...@@ -139,10 +139,8 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
BUG_ON(nr_inst != 4); BUG_ON(nr_inst != 4);
if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) || if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
WARN_ON_ONCE(has_vhe())) {
return; return;
}
/* /*
* Compute HYP VA by using the same computation as kern_hyp_va() * Compute HYP VA by using the same computation as kern_hyp_va()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment