Commit a59a2edb authored by David Brazdil's avatar David Brazdil Committed by Marc Zyngier

KVM: arm64: Substitute RANDOMIZE_BASE for HARDEN_EL2_VECTORS

The HARDEN_EL2_VECTORS config maps vectors at a fixed location on cores which
are susceptible to Spector variant 3a (A57, A72) to prevent defeating hyp
layout randomization by leaking the value of VBAR_EL2.

Since this feature is only applicable when EL2 layout randomization is enabled,
unify both behind the same RANDOMIZE_BASE Kconfig. Majority of code remains
conditional on a capability selected for the affected cores.
Signed-off-by: default avatarDavid Brazdil <dbrazdil@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200721094445.82184-3-dbrazdil@google.com
parent 24f69c0f
...@@ -1182,22 +1182,6 @@ config HARDEN_BRANCH_PREDICTOR ...@@ -1182,22 +1182,6 @@ config HARDEN_BRANCH_PREDICTOR
If unsure, say Y. If unsure, say Y.
config HARDEN_EL2_VECTORS
bool "Harden EL2 vector mapping against system register leak" if EXPERT
default y
help
Speculation attacks against some high-performance processors can
be used to leak privileged information such as the vector base
register, resulting in a potential defeat of the EL2 layout
randomization.
This config option will map the vectors to a fixed location,
independent of the EL2 code mapping, so that revealing VBAR_EL2
to an attacker does not give away any extra information. This
only gets enabled on affected CPUs.
If unsure, say Y.
config ARM64_SSBD config ARM64_SSBD
bool "Speculative Store Bypass Disable" if EXPERT bool "Speculative Store Bypass Disable" if EXPERT
default y default y
......
...@@ -42,12 +42,10 @@ struct bp_hardening_data { ...@@ -42,12 +42,10 @@ struct bp_hardening_data {
bp_hardening_cb_t fn; bp_hardening_cb_t fn;
}; };
#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \ #ifdef CONFIG_KVM_INDIRECT_VECTORS
defined(CONFIG_HARDEN_EL2_VECTORS))
extern char __bp_harden_hyp_vecs[]; extern char __bp_harden_hyp_vecs[];
extern atomic_t arm64_el2_vector_last_slot; extern atomic_t arm64_el2_vector_last_slot;
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */ #endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
......
...@@ -635,7 +635,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, ...@@ -635,7 +635,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
return is_midr_in_range(midr, &range) && has_dic; return is_midr_in_range(midr, &range) && has_dic;
} }
#if defined(CONFIG_HARDEN_EL2_VECTORS) #ifdef CONFIG_RANDOMIZE_BASE
static const struct midr_range ca57_a72[] = { static const struct midr_range ca57_a72[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
...@@ -880,7 +880,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -880,7 +880,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = check_branch_predictor, .matches = check_branch_predictor,
}, },
#ifdef CONFIG_HARDEN_EL2_VECTORS #ifdef CONFIG_RANDOMIZE_BASE
{ {
.desc = "EL2 vector hardening", .desc = "EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS, .capability = ARM64_HARDEN_EL2_VECTORS,
......
...@@ -58,7 +58,7 @@ config KVM_ARM_PMU ...@@ -58,7 +58,7 @@ config KVM_ARM_PMU
virtual machines. virtual machines.
config KVM_INDIRECT_VECTORS config KVM_INDIRECT_VECTORS
def_bool HARDEN_BRANCH_PREDICTOR || HARDEN_EL2_VECTORS def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
endif # KVM endif # KVM
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment