Commit 07cf8aa9 authored by Will Deacon's avatar Will Deacon Committed by Marc Zyngier

KVM: arm64: Make BP hardening globals static instead

Branch predictor hardening of the hyp vectors is partially driven by a
couple of global variables ('__kvm_bp_vect_base' and
'__kvm_harden_el2_vector_slot'). However, these are only used within a
single compilation unit, so internalise them there instead.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20201113113847.21619-5-will@kernel.org
parent 042c76a9
...@@ -208,9 +208,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -208,9 +208,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
return ret; return ret;
} }
extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot;
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
......
...@@ -51,6 +51,14 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); ...@@ -51,6 +51,14 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
/* Hypervisor VA of the indirect vector trampoline page */
static void *__kvm_bp_vect_base;
/*
* Slot in the hyp vector page for use by the indirect vector trampoline
* when mitigation against Spectre-v2 is not required.
*/
static int __kvm_harden_el2_vector_slot;
/* The VMID used in the VTTBR */ /* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid; static u32 kvm_next_vmid;
......
...@@ -131,9 +131,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt, ...@@ -131,9 +131,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
} }
} }
void *__kvm_bp_vect_base;
int __kvm_harden_el2_vector_slot;
void kvm_patch_vector_branch(struct alt_instr *alt, void kvm_patch_vector_branch(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst) __le32 *origptr, __le32 *updptr, int nr_inst)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment