Commit 9ef2b48b authored by Will Deacon's avatar Will Deacon

KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled

Patching the EL2 exception vectors is integral to the Spectre-v2
workaround, where it can be necessary to execute CPU-specific sequences
to nobble the branch predictor before running the hypervisor text proper.

Remove the dependency on CONFIG_RANDOMIZE_BASE and allow the EL2 vectors
to be patched even when KASLR is not enabled.

Fixes: 7a132017e7a5 ("KVM: arm64: Replace CONFIG_KVM_INDIRECT_VECTORS with CONFIG_RANDOMIZE_BASE")
Reported-by: default avatarkernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/r/202009221053.Jv1XsQUZ%lkp@intel.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 31c84d6c
...@@ -99,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); ...@@ -99,11 +99,9 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
#ifdef CONFIG_RANDOMIZE_BASE
extern atomic_t arm64_el2_vector_last_slot; extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs) #define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
#endif
extern void __kvm_flush_vm_context(void); extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
/* /*
...@@ -430,7 +431,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -430,7 +431,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
return ret; return ret;
} }
#ifdef CONFIG_RANDOMIZE_BASE
/* /*
* EL2 vectors can be mapped and rerouted in a number of ways, * EL2 vectors can be mapped and rerouted in a number of ways,
* depending on the kernel configuration and CPU present: * depending on the kernel configuration and CPU present:
...@@ -451,12 +451,9 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -451,12 +451,9 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
* VHE, as we don't have hypervisor-specific mappings. If the system * VHE, as we don't have hypervisor-specific mappings. If the system
* is VHE and yet selects this capability, it will be ignored. * is VHE and yet selects this capability, it will be ignored.
*/ */
#include <asm/mmu.h>
extern void *__kvm_bp_vect_base; extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot; extern int __kvm_harden_el2_vector_slot;
/* This is called on both VHE and !VHE systems */
static inline void *kvm_get_hyp_vector(void) static inline void *kvm_get_hyp_vector(void)
{ {
struct bp_hardening_data *data = arm64_get_bp_hardening_data(); struct bp_hardening_data *data = arm64_get_bp_hardening_data();
...@@ -480,52 +477,6 @@ static inline void *kvm_get_hyp_vector(void) ...@@ -480,52 +477,6 @@ static inline void *kvm_get_hyp_vector(void)
return vect; return vect;
} }
/* This is only called on a !VHE system */
static inline int kvm_map_vectors(void)
{
/*
* SV2 = ARM64_SPECTRE_V2
* HEL2 = ARM64_HARDEN_EL2_VECTORS
*
* !SV2 + !HEL2 -> use direct vectors
* SV2 + !HEL2 -> use hardened vectors in place
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
* SV2 + HEL2 -> use hardened vertors and use exec mapping
*/
if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
/*
* Always allocate a spare vector slot, as we don't
* know yet which CPUs have a BP hardening slot that
* we can reuse.
*/
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
return create_hyp_exec_mappings(vect_pa, size,
&__kvm_bp_vect_base);
}
return 0;
}
#else
static inline void *kvm_get_hyp_vector(void)
{
return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
}
static inline int kvm_map_vectors(void)
{
return 0;
}
#endif
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/* /*
......
...@@ -177,7 +177,6 @@ enum mitigation_state arm64_get_spectre_v2_state(void) ...@@ -177,7 +177,6 @@ enum mitigation_state arm64_get_spectre_v2_state(void)
} }
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
#ifdef CONFIG_RANDOMIZE_BASE
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
...@@ -235,7 +234,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn) ...@@ -235,7 +234,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn)
{ {
__this_cpu_write(bp_hardening_data.fn, fn); __this_cpu_write(bp_hardening_data.fn, fn);
} }
#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_KVM */ #endif /* CONFIG_KVM */
static void call_smc_arch_workaround_1(void) static void call_smc_arch_workaround_1(void)
......
...@@ -1256,6 +1256,40 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -1256,6 +1256,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
} }
} }
static int kvm_map_vectors(void)
{
/*
* SV2 = ARM64_SPECTRE_V2
* HEL2 = ARM64_HARDEN_EL2_VECTORS
*
* !SV2 + !HEL2 -> use direct vectors
* SV2 + !HEL2 -> use hardened vectors in place
* !SV2 + HEL2 -> allocate one vector slot and use exec mapping
* SV2 + HEL2 -> use hardened vectors and use exec mapping
*/
if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
/*
* Always allocate a spare vector slot, as we don't
* know yet which CPUs have a BP hardening slot that
* we can reuse.
*/
__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
return create_hyp_exec_mappings(vect_pa, size,
&__kvm_bp_vect_base);
}
return 0;
}
static void cpu_init_hyp_mode(void) static void cpu_init_hyp_mode(void)
{ {
phys_addr_t pgd_ptr; phys_addr_t pgd_ptr;
......
...@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ ...@@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \ -DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_KVM) += vhe/ nvhe/ obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
obj-$(CONFIG_RANDOMIZE_BASE) += smccc_wa.o
...@@ -259,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector) ...@@ -259,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_error // Error 32-bit EL1 valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector) SYM_CODE_END(__kvm_hyp_vector)
#ifdef CONFIG_RANDOMIZE_BASE
.macro hyp_ventry .macro hyp_ventry
.align 7 .align 7
1: esb 1: esb
...@@ -309,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs) ...@@ -309,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b .org 1b
SYM_CODE_END(__bp_harden_hyp_vecs) SYM_CODE_END(__bp_harden_hyp_vecs)
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment