Commit c199a009 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvm-arm64/el2-obj-v4.1' into kvmarm-master/next-WIP

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 9ebcfadb 6de7dd31
...@@ -42,22 +42,70 @@ ...@@ -42,22 +42,70 @@
#include <linux/mm.h> #include <linux/mm.h>
/* Translate a kernel address of @sym into its equivalent linear mapping */ /*
#define kvm_ksym_ref(sym) \ * Translate name of a symbol defined in nVHE hyp to the name seen
* by kernel proper. All nVHE symbols are prefixed by the build system
* to avoid clashes with the VHE variants.
*/
#define kvm_nvhe_sym(sym) __kvm_nvhe_##sym
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
/*
* Define a pair of symbols sharing the same name but one defined in
* VHE and the other in nVHE hyp implementations.
*/
#define DECLARE_KVM_HYP_SYM(sym) \
DECLARE_KVM_VHE_SYM(sym); \
DECLARE_KVM_NVHE_SYM(sym)
#define CHOOSE_VHE_SYM(sym) sym
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
#ifndef __KVM_NVHE_HYPERVISOR__
/*
* BIG FAT WARNINGS:
*
* - Don't be tempted to change the following is_kernel_in_hyp_mode()
* to has_vhe(). has_vhe() is implemented as a *final* capability,
* while this is used early at boot time, when the capabilities are
* not final yet....
*
* - Don't let the nVHE hypervisor have access to this, as it will
* pick the *wrong* symbol (yes, it runs at EL2...).
*/
#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
: CHOOSE_NVHE_SYM(sym))
#else
/* The nVHE hypervisor shouldn't even try to access anything */
extern void *__nvhe_undefined_symbol;
#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol
#endif
/* Translate a kernel address @ptr into its equivalent linear mapping */
#define kvm_ksym_ref(ptr) \
({ \ ({ \
void *val = &sym; \ void *val = (ptr); \
if (!is_kernel_in_hyp_mode()) \ if (!is_kernel_in_hyp_mode()) \
val = lm_alias(&sym); \ val = lm_alias((ptr)); \
val; \ val; \
}) })
#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
struct kvm; struct kvm;
struct kvm_vcpu; struct kvm_vcpu;
extern char __kvm_hyp_init[]; DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
extern char __kvm_hyp_init_end[]; DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
extern char __kvm_hyp_vector[]; #ifdef CONFIG_KVM_INDIRECT_VECTORS
extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
#endif
extern void __kvm_flush_vm_context(void); extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
...@@ -66,9 +114,7 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); ...@@ -66,9 +114,7 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff); extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
extern void __kvm_enable_ssbs(void); extern void __kvm_enable_ssbs(void);
......
...@@ -516,7 +516,7 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i ...@@ -516,7 +516,7 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
* Skip an instruction which has been emulated at hyp while most guest sysregs * Skip an instruction which has been emulated at hyp while most guest sysregs
* are live. * are live.
*/ */
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
{ {
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
......
...@@ -338,7 +338,7 @@ struct kvm_vcpu_arch { ...@@ -338,7 +338,7 @@ struct kvm_vcpu_arch {
struct vcpu_reset_state reset_state; struct vcpu_reset_state reset_state;
/* True when deferrable sysregs are loaded on the physical CPU, /* True when deferrable sysregs are loaded on the physical CPU,
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
bool sysregs_loaded_on_cpu; bool sysregs_loaded_on_cpu;
/* Guest PV state */ /* Guest PV state */
...@@ -448,6 +448,18 @@ void kvm_arm_resume_guest(struct kvm *kvm); ...@@ -448,6 +448,18 @@ void kvm_arm_resume_guest(struct kvm *kvm);
u64 __kvm_call_hyp(void *hypfn, ...); u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp_nvhe(f, ...) \
do { \
DECLARE_KVM_NVHE_SYM(f); \
__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__); \
} while(0)
#define kvm_call_hyp_nvhe_ret(f, ...) \
({ \
DECLARE_KVM_NVHE_SYM(f); \
__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__); \
})
/* /*
* The couple of isb() below are there to guarantee the same behaviour * The couple of isb() below are there to guarantee the same behaviour
* on VHE as on !VHE, where the eret to EL1 acts as a context * on VHE as on !VHE, where the eret to EL1 acts as a context
...@@ -459,7 +471,7 @@ u64 __kvm_call_hyp(void *hypfn, ...); ...@@ -459,7 +471,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
f(__VA_ARGS__); \ f(__VA_ARGS__); \
isb(); \ isb(); \
} else { \ } else { \
__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \ kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
} \ } \
} while(0) } while(0)
...@@ -471,8 +483,7 @@ u64 __kvm_call_hyp(void *hypfn, ...); ...@@ -471,8 +483,7 @@ u64 __kvm_call_hyp(void *hypfn, ...);
ret = f(__VA_ARGS__); \ ret = f(__VA_ARGS__); \
isb(); \ isb(); \
} else { \ } else { \
ret = __kvm_call_hyp(kvm_ksym_ref(f), \ ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__); \
##__VA_ARGS__); \
} \ } \
\ \
ret; \ ret; \
...@@ -628,8 +639,8 @@ static inline int kvm_arm_have_ssbd(void) ...@@ -628,8 +639,8 @@ static inline int kvm_arm_have_ssbd(void)
} }
} }
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
int kvm_set_ipa_limit(void); int kvm_set_ipa_limit(void);
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace __noscs
#define read_sysreg_elx(r,nvh,vh) \ #define read_sysreg_elx(r,nvh,vh) \
({ \ ({ \
u64 reg; \ u64 reg; \
...@@ -63,17 +61,20 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if); ...@@ -63,17 +61,20 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if); void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
#ifdef __KVM_NVHE_HYPERVISOR__
void __timer_enable_traps(struct kvm_vcpu *vcpu); void __timer_enable_traps(struct kvm_vcpu *vcpu);
void __timer_disable_traps(struct kvm_vcpu *vcpu); void __timer_disable_traps(struct kvm_vcpu *vcpu);
#endif
#ifdef __KVM_NVHE_HYPERVISOR__
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
#else
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
void __sysreg32_save_state(struct kvm_vcpu *vcpu); #endif
void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
void __debug_switch_to_guest(struct kvm_vcpu *vcpu); void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
void __debug_switch_to_host(struct kvm_vcpu *vcpu); void __debug_switch_to_host(struct kvm_vcpu *vcpu);
...@@ -81,11 +82,17 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu); ...@@ -81,11 +82,17 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
#ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu); void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void); void deactivate_traps_vhe_put(void);
#endif
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt);
#ifdef __KVM_NVHE_HYPERVISOR__
void __noreturn __hyp_do_panic(unsigned long, ...); void __noreturn __hyp_do_panic(unsigned long, ...);
#endif
#endif /* __ARM64_KVM_HYP_H__ */ #endif /* __ARM64_KVM_HYP_H__ */
...@@ -45,13 +45,6 @@ struct bp_hardening_data { ...@@ -45,13 +45,6 @@ struct bp_hardening_data {
bp_hardening_cb_t fn; bp_hardening_cb_t fn;
}; };
#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
defined(CONFIG_HARDEN_EL2_VECTORS))
extern char __bp_harden_hyp_vecs[];
extern atomic_t arm64_el2_vector_last_slot;
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
......
...@@ -85,10 +85,17 @@ static inline bool is_kernel_in_hyp_mode(void) ...@@ -85,10 +85,17 @@ static inline bool is_kernel_in_hyp_mode(void)
static __always_inline bool has_vhe(void) static __always_inline bool has_vhe(void)
{ {
if (cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN)) /*
* The following macros are defined for code specic to VHE/nVHE.
* If has_vhe() is inlined into those compilation units, it can
* be determined statically. Otherwise fall back to caps.
*/
if (__is_defined(__KVM_VHE_HYPERVISOR__))
return true; return true;
else if (__is_defined(__KVM_NVHE_HYPERVISOR__))
return false; return false;
else
return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -51,4 +51,58 @@ __efistub__ctype = _ctype; ...@@ -51,4 +51,58 @@ __efistub__ctype = _ctype;
#endif #endif
#ifdef CONFIG_KVM
/*
* KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, to
* separate it from the kernel proper. The following symbols are legally
* accessed by it, therefore provide aliases to make them linkable.
* Do not include symbols which may not be safely accessed under hypervisor
* memory mappings.
*/
#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
/* Alternative callbacks for init-time patching of nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
KVM_NVHE_ALIAS(kvm_host_data);
KVM_NVHE_ALIAS(kvm_vgic_global_state);
/* Kernel constant needed to compute idmap addresses. */
KVM_NVHE_ALIAS(kimage_voffset);
/* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */
KVM_NVHE_ALIAS(__hyp_panic_string);
KVM_NVHE_ALIAS(panic);
/* Vectors installed by hyp-init on reset HVC. */
KVM_NVHE_ALIAS(__hyp_stub_vectors);
/* IDMAP TCR_EL1.T0SZ as computed by the EL1 init code */
KVM_NVHE_ALIAS(idmap_t0sz);
/* Kernel symbol used by icache_is_vpipt(). */
KVM_NVHE_ALIAS(__icache_flags);
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
KVM_NVHE_ALIAS(arm64_const_caps_ready);
KVM_NVHE_ALIAS(cpu_hwcap_keys);
KVM_NVHE_ALIAS(cpu_hwcaps);
/* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
/* Static key checked in pmr_sync(). */
#ifdef CONFIG_ARM64_PSEUDO_NMI
KVM_NVHE_ALIAS(gic_pmr_sync);
#endif
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */ #endif /* __ARM64_KERNEL_IMAGE_VARS_H */
...@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/ ...@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
$(KVM)/vfio.o $(KVM)/irqchip.o \ $(KVM)/vfio.o $(KVM)/irqchip.o \
arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \ arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
inject_fault.o regmap.o va_layout.o hyp.o hyp-init.o handle_exit.o \ inject_fault.o regmap.o va_layout.o hyp.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o \ guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o \
vgic-sys-reg-v3.o fpsimd.o pmu.o \ vgic-sys-reg-v3.o fpsimd.o pmu.o \
aarch32.o arch_timer.o \ aarch32.o arch_timer.o \
......
...@@ -351,7 +351,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -351,7 +351,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_vgic_load(vcpu); kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu); kvm_timer_vcpu_load(vcpu);
kvm_vcpu_load_sysregs(vcpu); if (has_vhe())
kvm_vcpu_load_sysregs_vhe(vcpu);
kvm_arch_vcpu_load_fp(vcpu); kvm_arch_vcpu_load_fp(vcpu);
kvm_vcpu_pmu_restore_guest(vcpu); kvm_vcpu_pmu_restore_guest(vcpu);
if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
...@@ -369,7 +370,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -369,7 +370,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
kvm_arch_vcpu_put_fp(vcpu); kvm_arch_vcpu_put_fp(vcpu);
kvm_vcpu_put_sysregs(vcpu); if (has_vhe())
kvm_vcpu_put_sysregs_vhe(vcpu);
kvm_timer_vcpu_put(vcpu); kvm_timer_vcpu_put(vcpu);
kvm_vgic_put(vcpu); kvm_vgic_put(vcpu);
kvm_vcpu_pmu_restore_host(vcpu); kvm_vcpu_pmu_restore_host(vcpu);
...@@ -748,11 +750,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -748,11 +750,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
trace_kvm_entry(*vcpu_pc(vcpu)); trace_kvm_entry(*vcpu_pc(vcpu));
guest_enter_irqoff(); guest_enter_irqoff();
if (has_vhe()) { ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
ret = kvm_vcpu_run_vhe(vcpu);
} else {
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
}
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++; vcpu->stat.exits++;
...@@ -1285,7 +1283,7 @@ static void cpu_init_hyp_mode(void) ...@@ -1285,7 +1283,7 @@ static void cpu_init_hyp_mode(void)
* so that we can use adr_l to access per-cpu variables in EL2. * so that we can use adr_l to access per-cpu variables in EL2.
*/ */
tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) - tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
(unsigned long)kvm_ksym_ref(kvm_host_data)); (unsigned long)kvm_ksym_ref(&kvm_host_data));
pgd_ptr = kvm_mmu_get_httbr(); pgd_ptr = kvm_mmu_get_httbr();
hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
...@@ -1306,7 +1304,7 @@ static void cpu_init_hyp_mode(void) ...@@ -1306,7 +1304,7 @@ static void cpu_init_hyp_mode(void)
*/ */
if (this_cpu_has_cap(ARM64_SSBS) && if (this_cpu_has_cap(ARM64_SSBS) &&
arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
kvm_call_hyp(__kvm_enable_ssbs); kvm_call_hyp_nvhe(__kvm_enable_ssbs);
} }
} }
......
...@@ -3,18 +3,12 @@ ...@@ -3,18 +3,12 @@
# Makefile for Kernel-based Virtual Machine module, HYP part # Makefile for Kernel-based Virtual Machine module, HYP part
# #
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \ incdir := $(srctree)/$(src)/include
$(DISABLE_STACKLEAK_PLUGIN) subdir-asflags-y := -I$(incdir)
subdir-ccflags-y := -I$(incdir) \
obj-$(CONFIG_KVM) += hyp.o -fno-stack-protector \
-DDISABLE_BRANCH_PROFILING \
hyp-y := vgic-v3-sr.o timer-sr.o aarch32.o vgic-v2-cpuif-proxy.o sysreg-sr.o \ $(DISABLE_STACKLEAK_PLUGIN)
debug-sr.o entry.o switch.o fpsimd.o tlb.o hyp-entry.o
obj-$(CONFIG_KVM) += vhe/ nvhe/
# KVM code is run at a different exception code with a different map, so obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
# compiler instrumentation that inserts callbacks or checks into the code may
# cause crashes. Just disable it.
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
...@@ -44,7 +44,7 @@ static const unsigned short cc_map[16] = { ...@@ -44,7 +44,7 @@ static const unsigned short cc_map[16] = {
/* /*
* Check if a trapped instruction should have been executed or not. * Check if a trapped instruction should have been executed or not.
*/ */
bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
{ {
unsigned long cpsr; unsigned long cpsr;
u32 cpsr_cond; u32 cpsr_cond;
...@@ -93,7 +93,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu) ...@@ -93,7 +93,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
* *
* IT[7:0] -> CPSR[26:25],CPSR[15:10] * IT[7:0] -> CPSR[26:25],CPSR[15:10]
*/ */
static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
{ {
unsigned long itbits, cond; unsigned long itbits, cond;
unsigned long cpsr = *vcpu_cpsr(vcpu); unsigned long cpsr = *vcpu_cpsr(vcpu);
...@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu) ...@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
* kvm_skip_instr - skip a trapped instruction and proceed to the next * kvm_skip_instr - skip a trapped instruction and proceed to the next
* @vcpu: The vcpu pointer * @vcpu: The vcpu pointer
*/ */
void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
{ {
u32 pc = *vcpu_pc(vcpu); u32 pc = *vcpu_pc(vcpu);
bool is_thumb; bool is_thumb;
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8) #define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
.text .text
.pushsection .hyp.text, "ax"
/* /*
* We treat x18 as callee-saved as the host may use it as a platform * We treat x18 as callee-saved as the host may use it as a platform
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <asm/fpsimdmacros.h> #include <asm/fpsimdmacros.h>
.text .text
.pushsection .hyp.text, "ax"
SYM_FUNC_START(__fpsimd_save_state) SYM_FUNC_START(__fpsimd_save_state)
fpsimd_save x0, 1 fpsimd_save x0, 1
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <asm/mmu.h> #include <asm/mmu.h>
.text .text
.pushsection .hyp.text, "ax"
.macro do_el2_call .macro do_el2_call
/* /*
...@@ -40,6 +39,7 @@ el1_sync: // Guest trapped into EL2 ...@@ -40,6 +39,7 @@ el1_sync: // Guest trapped into EL2
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap b.ne el1_trap
#ifdef __KVM_NVHE_HYPERVISOR__
mrs x1, vttbr_el2 // If vttbr is valid, the guest mrs x1, vttbr_el2 // If vttbr is valid, the guest
cbnz x1, el1_hvc_guest // called HVC cbnz x1, el1_hvc_guest // called HVC
...@@ -74,6 +74,7 @@ el1_sync: // Guest trapped into EL2 ...@@ -74,6 +74,7 @@ el1_sync: // Guest trapped into EL2
eret eret
sb sb
#endif /* __KVM_NVHE_HYPERVISOR__ */
el1_hvc_guest: el1_hvc_guest:
/* /*
...@@ -180,6 +181,7 @@ el2_error: ...@@ -180,6 +181,7 @@ el2_error:
eret eret
sb sb
#ifdef __KVM_NVHE_HYPERVISOR__
SYM_FUNC_START(__hyp_do_panic) SYM_FUNC_START(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h) PSR_MODE_EL1h)
...@@ -189,6 +191,7 @@ SYM_FUNC_START(__hyp_do_panic) ...@@ -189,6 +191,7 @@ SYM_FUNC_START(__hyp_do_panic)
eret eret
sb sb
SYM_FUNC_END(__hyp_do_panic) SYM_FUNC_END(__hyp_do_panic)
#endif
SYM_CODE_START(__hyp_panic) SYM_CODE_START(__hyp_panic)
get_host_ctxt x0, x1 get_host_ctxt x0, x1
...@@ -318,20 +321,4 @@ SYM_CODE_START(__bp_harden_hyp_vecs) ...@@ -318,20 +321,4 @@ SYM_CODE_START(__bp_harden_hyp_vecs)
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b .org 1b
SYM_CODE_END(__bp_harden_hyp_vecs) SYM_CODE_END(__bp_harden_hyp_vecs)
.popsection
SYM_CODE_START(__smccc_workaround_1_smc)
esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
.org 1b
SYM_CODE_END(__smccc_workaround_1_smc)
#endif #endif
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
*/ */
#ifndef __ARM64_KVM_HYP_DEBUG_SR_H__
#define __ARM64_KVM_HYP_DEBUG_SR_H__
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
...@@ -85,53 +88,9 @@ ...@@ -85,53 +88,9 @@
default: write_debug(ptr[0], reg, 0); \ default: write_debug(ptr[0], reg, 0); \
} }
static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1) static inline void __debug_save_state(struct kvm_vcpu *vcpu,
{ struct kvm_guest_debug_arch *dbg,
u64 reg; struct kvm_cpu_context *ctxt)
/* Clear pmscr in case of early return */
*pmscr_el1 = 0;
/* SPE present on this CPU? */
if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
ID_AA64DFR0_PMSVER_SHIFT))
return;
/* Yes; is it owned by EL3? */
reg = read_sysreg_s(SYS_PMBIDR_EL1);
if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
return;
/* No; is the host actually using the thing? */
reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
return;
/* Yes; save the control register and disable data generation */
*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
write_sysreg_s(0, SYS_PMSCR_EL1);
isb();
/* Now drain all buffered data to memory */
psb_csync();
dsb(nsh);
}
static void __hyp_text __debug_restore_spe_nvhe(u64 pmscr_el1)
{
if (!pmscr_el1)
return;
/* The host page table is installed, but not yet synchronised */
isb();
/* Re-enable data generation */
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}
static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
{ {
u64 aa64dfr0; u64 aa64dfr0;
int brps, wrps; int brps, wrps;
...@@ -148,9 +107,9 @@ static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu, ...@@ -148,9 +107,9 @@ static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1); ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
} }
static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg, struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt) struct kvm_cpu_context *ctxt)
{ {
u64 aa64dfr0; u64 aa64dfr0;
int brps, wrps; int brps, wrps;
...@@ -168,20 +127,13 @@ static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, ...@@ -168,20 +127,13 @@ static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
} }
void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
struct kvm_guest_debug_arch *host_dbg; struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg; struct kvm_guest_debug_arch *guest_dbg;
/*
* Non-VHE: Disable and flush SPE data generation
* VHE: The vcpu can run, but it can't hide.
*/
if (!has_vhe())
__debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return; return;
...@@ -194,16 +146,13 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -194,16 +146,13 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
__debug_restore_state(vcpu, guest_dbg, guest_ctxt); __debug_restore_state(vcpu, guest_dbg, guest_ctxt);
} }
void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
struct kvm_guest_debug_arch *host_dbg; struct kvm_guest_debug_arch *host_dbg;
struct kvm_guest_debug_arch *guest_dbg; struct kvm_guest_debug_arch *guest_dbg;
if (!has_vhe())
__debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return; return;
...@@ -218,7 +167,4 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) ...@@ -218,7 +167,4 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY; vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY;
} }
u32 __hyp_text __kvm_get_mdcr_el2(void) #endif /* __ARM64_KVM_HYP_DEBUG_SR_H__ */
{
return read_sysreg(mdcr_el2);
}
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
*/ */
#ifndef __ARM64_KVM_HYP_SYSREG_SR_H__
#define __ARM64_KVM_HYP_SYSREG_SR_H__
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
...@@ -12,30 +15,18 @@ ...@@ -12,30 +15,18 @@
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
/* static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
* Non-VHE: Both host and guest must save everything.
*
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
* pstate, which are handled as part of the el2 return state) on every
* switch (sp_el0 is being dealt with in the assembly code).
* tpidr_el0 and tpidrro_el0 only need to be switched when going
* to host userspace or a different VCPU. EL1 registers only need to be
* switched when potentially going to run a different VCPU. The latter two
* classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
*/
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
} }
static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
} }
static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
...@@ -60,7 +51,7 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) ...@@ -60,7 +51,7 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR); ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
} }
static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
{ {
ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR); ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
...@@ -69,39 +60,18 @@ static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ct ...@@ -69,39 +60,18 @@ static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ct
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
} }
void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
__sysreg_save_el1_state(ctxt);
__sysreg_save_common_state(ctxt);
__sysreg_save_user_state(ctxt);
__sysreg_save_el2_return_state(ctxt);
}
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_common_state(ctxt);
}
NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_common_state(ctxt);
__sysreg_save_el2_return_state(ctxt);
}
NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
} }
static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
} }
static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
{ {
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
...@@ -114,7 +84,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) ...@@ -114,7 +84,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
/* /*
* Must only be done for guest registers, hence the context * Must only be done for guest registers, hence the context
* test. We're coming from the host, so SCTLR.M is already * test. We're coming from the host, so SCTLR.M is already
* set. Pairs with __activate_traps_nvhe(). * set. Pairs with nVHE's __activate_traps().
*/ */
write_sysreg_el1((ctxt->sys_regs[TCR_EL1] | write_sysreg_el1((ctxt->sys_regs[TCR_EL1] |
TCR_EPD1_MASK | TCR_EPD0_MASK), TCR_EPD1_MASK | TCR_EPD0_MASK),
...@@ -142,7 +112,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) ...@@ -142,7 +112,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
ctxt->__hyp_running_vcpu) { ctxt->__hyp_running_vcpu) {
/* /*
* Must only be done for host registers, hence the context * Must only be done for host registers, hence the context
* test. Pairs with __deactivate_traps_nvhe(). * test. Pairs with nVHE's __deactivate_traps().
*/ */
isb(); isb();
/* /*
...@@ -160,8 +130,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) ...@@ -160,8 +130,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR); write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
} }
static void __hyp_text static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
__sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
{ {
u64 pstate = ctxt->gp_regs.regs.pstate; u64 pstate = ctxt->gp_regs.regs.pstate;
u64 mode = pstate & PSR_AA32_MODE_MASK; u64 mode = pstate & PSR_AA32_MODE_MASK;
...@@ -187,28 +156,7 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) ...@@ -187,28 +156,7 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
} }
void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
{
__sysreg_restore_el1_state(ctxt);
__sysreg_restore_common_state(ctxt);
__sysreg_restore_user_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
}
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_common_state(ctxt);
}
NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_common_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
}
NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
{ {
u64 *spsr, *sysreg; u64 *spsr, *sysreg;
...@@ -230,7 +178,7 @@ void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) ...@@ -230,7 +178,7 @@ void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2); sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
} }
void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
{ {
u64 *spsr, *sysreg; u64 *spsr, *sysreg;
...@@ -252,82 +200,4 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) ...@@ -252,82 +200,4 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2); write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
} }
/** #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */
* kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU
*
* @vcpu: The VCPU pointer
*
* Load system registers that do not affect the host's execution, for
* example EL1 system registers on a VHE system where the host kernel
* runs at EL2. This function is called from KVM's vcpu_load() function
* and loading system register state early avoids having to load them on
* every entry to the VM.
*/
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
* Load guest EL1 and user state
*
* We must restore the 32-bit state before the sysregs, thanks
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*/
__sysreg32_restore_state(vcpu);
__sysreg_restore_user_state(guest_ctxt);
__sysreg_restore_el1_state(guest_ctxt);
vcpu->arch.sysregs_loaded_on_cpu = true;
activate_traps_vhe_load(vcpu);
}
/**
* kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU
*
* @vcpu: The VCPU pointer
*
* Save guest system registers that do not affect the host's execution, for
* example EL1 system registers on a VHE system where the host kernel
* runs at EL2. This function is called from KVM's vcpu_put() function
* and deferring saving system register state until we're no longer running the
* VCPU avoids having to save them on every exit from the VM.
*/
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
__sysreg_save_user_state(guest_ctxt);
__sysreg32_save_state(vcpu);
/* Restore host user state */
__sysreg_restore_user_state(host_ctxt);
vcpu->arch.sysregs_loaded_on_cpu = false;
}
void __hyp_text __kvm_enable_ssbs(void)
{
u64 tmp;
asm volatile(
"mrs %0, sctlr_el2\n"
"orr %0, %0, %1\n"
"msr sctlr_el2, %0"
: "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
}
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Kernel-based Virtual Machine module, HYP/nVHE part
#
asflags-y := -D__KVM_NVHE_HYPERVISOR__
ccflags-y := -D__KVM_NVHE_HYPERVISOR__
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o
obj-y := $(patsubst %.o,%.hyp.o,$(obj-y))
extra-y := $(patsubst %.hyp.o,%.hyp.tmp.o,$(obj-y))
$(obj)/%.hyp.tmp.o: $(src)/%.c FORCE
$(call if_changed_rule,cc_o_c)
$(obj)/%.hyp.tmp.o: $(src)/%.S FORCE
$(call if_changed_rule,as_o_S)
$(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE
$(call if_changed,hypcopy)
quiet_cmd_hypcopy = HYPCOPY $@
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ \
--rename-section=.text=.hyp.text \
$< $@
# Remove ftrace and Shadow Call Stack CFLAGS.
# This is equivalent to the 'notrace' and '__noscs' annotations.
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
# KVM nVHE code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
# cause crashes. Just disable it.
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# Skip objtool checking for this directory because nVHE code is compiled with
# non-standard build rules.
OBJECT_FILES_NON_STANDARD := y
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <hyp/debug-sr.h>
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/debug-monitors.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
static void __debug_save_spe(u64 *pmscr_el1)
{
u64 reg;
/* Clear pmscr in case of early return */
*pmscr_el1 = 0;
/* SPE present on this CPU? */
if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
ID_AA64DFR0_PMSVER_SHIFT))
return;
/* Yes; is it owned by EL3? */
reg = read_sysreg_s(SYS_PMBIDR_EL1);
if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
return;
/* No; is the host actually using the thing? */
reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
return;
/* Yes; save the control register and disable data generation */
*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
write_sysreg_s(0, SYS_PMSCR_EL1);
isb();
/* Now drain all buffered data to memory */
psb_csync();
dsb(nsh);
}
static void __debug_restore_spe(u64 pmscr_el1)
{
if (!pmscr_el1)
return;
/* The host page table is installed, but not yet synchronised */
isb();
/* Re-enable data generation */
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
{
/* Disable and flush SPE data generation */
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
__debug_switch_to_guest_common(vcpu);
}
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
{
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
__debug_switch_to_host_common(vcpu);
}
u32 __kvm_get_mdcr_el2(void)
{
return read_sysreg(mdcr_el2);
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <hyp/switch.h>
#include <hyp/sysreg-sr.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
#include <kvm/arm_psci.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
___activate_traps(vcpu);
__activate_traps_common(vcpu);
val = CPTR_EL2_DEFAULT;
val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
if (!update_fp_enabled(vcpu)) {
val |= CPTR_EL2_TFP;
__activate_traps_fpsimd32(vcpu);
}
write_sysreg(val, cptr_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
isb();
/*
* At this stage, and thanks to the above isb(), S2 is
* configured and enabled. We can now restore the guest's S1
* configuration: SCTLR, and only then TCR.
*/
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
isb();
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
}
}
static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
u64 mdcr_el2;
___deactivate_traps(vcpu);
mdcr_el2 = read_sysreg(mdcr_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
/*
* Set the TCR and SCTLR registers in the exact opposite
* sequence as __activate_traps (first prevent walks,
* then force the MMU on). A generous sprinkling of isb()
* ensure that things happen in this exact order.
*/
val = read_sysreg_el1(SYS_TCR);
write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
isb();
val = read_sysreg_el1(SYS_SCTLR);
write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
isb();
}
__deactivate_traps_common();
mdcr_el2 &= MDCR_EL2_HPMN_MASK;
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
write_sysreg(mdcr_el2, mdcr_el2);
write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
}
static void __deactivate_vm(struct kvm_vcpu *vcpu)
{
write_sysreg(0, vttbr_el2);
}
/* Save VGICv3 state on non-VHE systems */
static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
}
}
/* Restore VGICv3 state on non_VEH systems */
static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
}
/**
* Disable host events, enable guest events
*/
static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
{
struct kvm_host_data *host;
struct kvm_pmu_events *pmu;
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
pmu = &host->pmu_events;
if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenclr_el0);
if (pmu->events_guest)
write_sysreg(pmu->events_guest, pmcntenset_el0);
return (pmu->events_host || pmu->events_guest);
}
/**
* Disable guest events, enable host events
*/
static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
{
struct kvm_host_data *host;
struct kvm_pmu_events *pmu;
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
pmu = &host->pmu_events;
if (pmu->events_guest)
write_sysreg(pmu->events_guest, pmcntenclr_el0);
if (pmu->events_host)
write_sysreg(pmu->events_host, pmcntenset_el0);
}
/* Switch to the guest for legacy non-VHE systems */
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
bool pmu_switch_needed;
u64 exit_code;
/*
* Having IRQs masked via PMR when entering the guest means the GIC
* will not signal the CPU of interrupts of lower priority, and the
* only way to get out will be via guest exceptions.
* Naturally, we want to avoid this.
*/
if (system_uses_irq_prio_masking()) {
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
pmr_sync();
}
vcpu = kern_hyp_va(vcpu);
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
__sysreg_save_state_nvhe(host_ctxt);
/*
* We must restore the 32-bit state before the sysregs, thanks
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*
* Also, and in order to be able to deal with erratum #1319537 (A57)
* and #1319367 (A72), we must ensure that all VM-related sysreg are
* restored before we enable S2 translation.
*/
__sysreg32_restore_state(vcpu);
__sysreg_restore_state_nvhe(guest_ctxt);
__activate_vm(kern_hyp_va(vcpu->kvm));
__activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
__timer_enable_traps(vcpu);
__debug_switch_to_guest(vcpu);
__set_guest_arch_workaround_state(vcpu);
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
__set_host_arch_workaround_state(vcpu);
__sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu);
__hyp_vgic_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__sysreg_restore_state_nvhe(host_ctxt);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
__fpsimd_save_fpexc32(vcpu);
/*
* This must come after restoring the host sysregs, since a non-VHE
* system may enable SPE here and make use of the TTBRs.
*/
__debug_switch_to_host(vcpu);
if (pmu_switch_needed)
__pmu_switch_to_host(host_ctxt);
/* Returning to host will clear PSR.I, remask PMR if needed */
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQOFF);
return exit_code;
}
void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu;
unsigned long str_va;
if (read_sysreg(vttbr_el2)) {
__timer_disable_traps(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__sysreg_restore_state_nvhe(host_ctxt);
}
/*
* Force the panic string to be loaded from the literal pool,
* making sure it is a kernel address and not a PC-relative
* reference.
*/
asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
__hyp_do_panic(str_va,
spsr, elr,
read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
read_sysreg(hpfar_el2), par, vcpu);
unreachable();
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <hyp/sysreg-sr.h>
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
/*
* Non-VHE: Both host and guest must save everything.
*/
void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_el1_state(ctxt);
__sysreg_save_common_state(ctxt);
__sysreg_save_user_state(ctxt);
__sysreg_save_el2_return_state(ctxt);
}
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_el1_state(ctxt);
__sysreg_restore_common_state(ctxt);
__sysreg_restore_user_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
}
void __kvm_enable_ssbs(void)
{
u64 tmp;
asm volatile(
"mrs %0, sctlr_el2\n"
"orr %0, %0, %1\n"
"msr sctlr_el2, %0"
: "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
}
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
void __hyp_text __kvm_timer_set_cntvoff(u64 cntvoff) void __kvm_timer_set_cntvoff(u64 cntvoff)
{ {
write_sysreg(cntvoff, cntvoff_el2); write_sysreg(cntvoff, cntvoff_el2);
} }
...@@ -19,7 +19,7 @@ void __hyp_text __kvm_timer_set_cntvoff(u64 cntvoff) ...@@ -19,7 +19,7 @@ void __hyp_text __kvm_timer_set_cntvoff(u64 cntvoff)
* Should only be called on non-VHE systems. * Should only be called on non-VHE systems.
* VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe(). * VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe().
*/ */
void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) void __timer_disable_traps(struct kvm_vcpu *vcpu)
{ {
u64 val; u64 val;
...@@ -33,7 +33,7 @@ void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) ...@@ -33,7 +33,7 @@ void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
* Should only be called on non-VHE systems. * Should only be called on non-VHE systems.
* VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe(). * VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe().
*/ */
void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) void __timer_enable_traps(struct kvm_vcpu *vcpu)
{ {
u64 val; u64 val;
......
...@@ -4,64 +4,15 @@ ...@@ -4,64 +4,15 @@
* Author: Marc Zyngier <marc.zyngier@arm.com> * Author: Marc Zyngier <marc.zyngier@arm.com>
*/ */
#include <linux/irqflags.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
struct tlb_inv_context { struct tlb_inv_context {
unsigned long flags;
u64 tcr; u64 tcr;
u64 sctlr;
}; };
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
struct tlb_inv_context *cxt)
{
u64 val;
local_irq_save(cxt->flags);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/*
* For CPUs that are affected by ARM errata 1165522 or 1530923,
* we cannot trust stage-1 to be in a correct state at that
* point. Since we do not want to force a full load of the
* vcpu state, we prevent the EL1 page-table walker to
* allocate new TLBs. This is done by setting the EPD bits
* in the TCR_EL1 register. We also need to prevent it to
* allocate IPA->PA walks, so we enable the S1 MMU...
*/
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
write_sysreg_el1(val, SYS_TCR);
val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
val |= SCTLR_ELx_M;
write_sysreg_el1(val, SYS_SCTLR);
}
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
* guest TLBs (EL1/EL0), we need to change one of these two
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
*
* ARM erratum 1165522 requires some special handling (again),
* as we need to make sure both stages of translation are in
* place before clearing TGE. __load_guest_stage2() already
* has an ISB in order to deal with this.
*/
__load_guest_stage2(kvm);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);
isb();
}
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{ {
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val; u64 val;
...@@ -84,37 +35,7 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, ...@@ -84,37 +35,7 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
} }
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm, static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
struct tlb_inv_context *cxt)
{
if (has_vhe())
__tlb_switch_to_guest_vhe(kvm, cxt);
else
__tlb_switch_to_guest_nvhe(kvm, cxt);
}
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
/*
* We're done with the TLB operation, let's restore the host's
* view of HCR_EL2.
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
isb();
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Restore the registers to what they were */
write_sysreg_el1(cxt->tcr, SYS_TCR);
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
}
local_irq_restore(cxt->flags);
}
static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{ {
write_sysreg(0, vttbr_el2); write_sysreg(0, vttbr_el2);
...@@ -126,16 +47,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, ...@@ -126,16 +47,7 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
} }
} }
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm, void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
struct tlb_inv_context *cxt)
{
if (has_vhe())
__tlb_switch_to_host_vhe(kvm, cxt);
else
__tlb_switch_to_host_nvhe(kvm, cxt);
}
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -183,13 +95,13 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ...@@ -183,13 +95,13 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
* The moral of this story is: if you have a VPIPT I-cache, then * The moral of this story is: if you have a VPIPT I-cache, then
* you should be running with VHE enabled. * you should be running with VHE enabled.
*/ */
if (!has_vhe() && icache_is_vpipt()) if (icache_is_vpipt())
__flush_icache_all(); __flush_icache_all();
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) void __kvm_tlb_flush_vmid(struct kvm *kvm)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -206,7 +118,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) ...@@ -206,7 +118,7 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
...@@ -221,7 +133,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) ...@@ -221,7 +133,7 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(kvm, &cxt);
} }
void __hyp_text __kvm_flush_vm_context(void) void __kvm_flush_vm_context(void)
{ {
dsb(ishst); dsb(ishst);
__tlbi(alle1is); __tlbi(alle1is);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015-2018 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
/*
* This is not executed directly and is instead copied into the vectors
* by install_bp_hardening_cb().
*/
.data
.pushsection .rodata
.global __smccc_workaround_1_smc
SYM_DATA_START(__smccc_workaround_1_smc)
esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
smc #0
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
.org 1b
SYM_DATA_END(__smccc_workaround_1_smc)
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) static bool __is_be(struct kvm_vcpu *vcpu)
{ {
if (vcpu_mode_is_32bit(vcpu)) if (vcpu_mode_is_32bit(vcpu))
return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT); return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
...@@ -32,7 +32,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu) ...@@ -32,7 +32,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
* 0: Not a GICV access * 0: Not a GICV access
* -1: Illegal GICV access successfully performed * -1: Illegal GICV access successfully performed
*/ */
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct vgic_dist *vgic = &kvm->arch.vgic; struct vgic_dist *vgic = &kvm->arch.vgic;
......
This diff is collapsed.
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Kernel-based Virtual Machine module, HYP/nVHE part
#
asflags-y := -D__KVM_VHE_HYPERVISOR__
ccflags-y := -D__KVM_VHE_HYPERVISOR__
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <hyp/debug-sr.h>
#include <linux/kvm_host.h>
#include <asm/kvm_hyp.h>
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
{
__debug_switch_to_guest_common(vcpu);
}
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
{
__debug_switch_to_host_common(vcpu);
}
u32 __kvm_get_mdcr_el2(void)
{
return read_sysreg(mdcr_el2);
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <hyp/switch.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
#include <kvm/arm_psci.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
___activate_traps(vcpu);
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
* CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
* except for some missing controls, such as TAM.
* In this case, CPTR_EL2.TAM has the same position with or without
* VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
* shift value for trapping the AMU accesses.
*/
val |= CPTR_EL2_TAM;
if (update_fp_enabled(vcpu)) {
if (vcpu_has_sve(vcpu))
val |= CPACR_EL1_ZEN;
} else {
val &= ~CPACR_EL1_FPEN;
__activate_traps_fpsimd32(vcpu);
}
write_sysreg(val, cpacr_el1);
write_sysreg(kvm_get_hyp_vector(), vbar_el1);
}
NOKPROBE_SYMBOL(__activate_traps);
static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
extern char vectors[]; /* kernel exception vectors */
___deactivate_traps(vcpu);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
/*
* ARM errata 1165522 and 1530923 require the actual execution of the
* above before we can switch to the EL2/EL0 translation regime used by
* the host.
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
NOKPROBE_SYMBOL(__deactivate_traps);
void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
{
__activate_traps_common(vcpu);
}
void deactivate_traps_vhe_put(void)
{
u64 mdcr_el2 = read_sysreg(mdcr_el2);
mdcr_el2 &= MDCR_EL2_HPMN_MASK |
MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
MDCR_EL2_TPMS;
write_sysreg(mdcr_el2, mdcr_el2);
__deactivate_traps_common();
}
/* Switch to the guest for VHE systems running in EL2 */
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
sysreg_save_host_state_vhe(host_ctxt);
/*
* ARM erratum 1165522 requires us to configure both stage 1 and
* stage 2 translation for the guest context before we clear
* HCR_EL2.TGE.
*
* We have already configured the guest's stage 1 translation in
* kvm_vcpu_load_sysregs_vhe above. We must now call __activate_vm
* before __activate_traps, because __activate_vm configures
* stage 2 translation, and __activate_traps clear HCR_EL2.TGE
* (among other things).
*/
__activate_vm(vcpu->kvm);
__activate_traps(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
__set_guest_arch_workaround_state(vcpu);
do {
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
__set_host_arch_workaround_state(vcpu);
sysreg_save_guest_state_vhe(guest_ctxt);
__deactivate_traps(vcpu);
sysreg_restore_host_state_vhe(host_ctxt);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
__fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu);
return exit_code;
}
NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
int ret;
local_daif_mask();
/*
* Having IRQs masked via PMR when entering the guest means the GIC
* will not signal the CPU of interrupts of lower priority, and the
* only way to get out will be via guest exceptions.
* Naturally, we want to avoid this.
*
* local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
* dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
*/
pmr_sync();
ret = __kvm_vcpu_run_vhe(vcpu);
/*
* local_daif_restore() takes care to properly restore PSTATE.DAIF
* and the GIC PMR if the host is using IRQ priorities.
*/
local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
* When we exit from the guest we change a number of CPU configuration
* parameters, such as traps. Make sure these changes take effect
* before running the host or additional guests.
*/
isb();
return ret;
}
static void __hyp_call_panic(u64 spsr, u64 elr, u64 par,
struct kvm_cpu_context *host_ctxt)
{
struct kvm_vcpu *vcpu;
vcpu = host_ctxt->__hyp_running_vcpu;
__deactivate_traps(vcpu);
sysreg_restore_host_state_vhe(host_ctxt);
panic(__hyp_panic_string,
spsr, elr,
read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
read_sysreg(hpfar_el2), par, vcpu);
}
NOKPROBE_SYMBOL(__hyp_call_panic);
void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
__hyp_call_panic(spsr, elr, par, host_ctxt);
unreachable();
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <hyp/sysreg-sr.h>
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
/*
* VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
* pstate, which are handled as part of the el2 return state) on every
* switch (sp_el0 is being dealt with in the assembly code).
* tpidr_el0 and tpidrro_el0 only need to be switched when going
* to host userspace or a different VCPU. EL1 registers only need to be
* switched when potentially going to run a different VCPU. The latter two
* classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
*/
void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_common_state(ctxt);
}
NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_common_state(ctxt);
__sysreg_save_el2_return_state(ctxt);
}
NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_common_state(ctxt);
}
NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_common_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
}
NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
/**
* kvm_vcpu_load_sysregs_vhe - Load guest system registers to the physical CPU
*
* @vcpu: The VCPU pointer
*
* Load system registers that do not affect the host's execution, for
* example EL1 system registers on a VHE system where the host kernel
* runs at EL2. This function is called from KVM's vcpu_load() function
* and loading system register state early avoids having to load them on
* every entry to the VM.
*/
void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
* Load guest EL1 and user state
*
* We must restore the 32-bit state before the sysregs, thanks
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*/
__sysreg32_restore_state(vcpu);
__sysreg_restore_user_state(guest_ctxt);
__sysreg_restore_el1_state(guest_ctxt);
vcpu->arch.sysregs_loaded_on_cpu = true;
activate_traps_vhe_load(vcpu);
}
/**
* kvm_vcpu_put_sysregs_vhe - Restore host system registers to the physical CPU
*
* @vcpu: The VCPU pointer
*
* Save guest system registers that do not affect the host's execution, for
* example EL1 system registers on a VHE system where the host kernel
* runs at EL2. This function is called from KVM's vcpu_put() function
* and deferring saving system register state until we're no longer running the
* VCPU avoids having to save them on every exit from the VM.
*/
void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
__sysreg_save_user_state(guest_ctxt);
__sysreg32_save_state(vcpu);
/* Restore host user state */
__sysreg_restore_user_state(host_ctxt);
vcpu->arch.sysregs_loaded_on_cpu = false;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <asm/kvm_hyp.h>
void __kvm_timer_set_cntvoff(u64 cntvoff)
{
write_sysreg(cntvoff, cntvoff_el2);
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/irqflags.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
struct tlb_inv_context {
unsigned long flags;
u64 tcr;
u64 sctlr;
};
static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
{
u64 val;
local_irq_save(cxt->flags);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/*
* For CPUs that are affected by ARM errata 1165522 or 1530923,
* we cannot trust stage-1 to be in a correct state at that
* point. Since we do not want to force a full load of the
* vcpu state, we prevent the EL1 page-table walker to
* allocate new TLBs. This is done by setting the EPD bits
* in the TCR_EL1 register. We also need to prevent it to
* allocate IPA->PA walks, so we enable the S1 MMU...
*/
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
write_sysreg_el1(val, SYS_TCR);
val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
val |= SCTLR_ELx_M;
write_sysreg_el1(val, SYS_SCTLR);
}
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
* guest TLBs (EL1/EL0), we need to change one of these two
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
*
* ARM erratum 1165522 requires some special handling (again),
* as we need to make sure both stages of translation are in
* place before clearing TGE. __load_guest_stage2() already
* has an ISB in order to deal with this.
*/
__load_guest_stage2(kvm);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);
isb();
}
static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
{
/*
* We're done with the TLB operation, let's restore the host's
* view of HCR_EL2.
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
isb();
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Restore the registers to what they were */
write_sysreg_el1(cxt->tcr, SYS_TCR);
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
}
local_irq_restore(cxt->flags);
}
void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
struct tlb_inv_context cxt;
dsb(ishst);
/* Switch to requested VMID */
__tlb_switch_to_guest(kvm, &cxt);
/*
* We could do so much better if we had the VA as well.
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
ipa >>= 12;
__tlbi(ipas2e1is, ipa);
/*
* We have to ensure completion of the invalidation at Stage-2,
* since a table walk on another CPU could refill a TLB with a
* complete (S1 + S2) walk based on the old Stage-2 mapping if
* the Stage-1 invalidation happened first.
*/
dsb(ish);
__tlbi(vmalle1is);
dsb(ish);
isb();
__tlb_switch_to_host(kvm, &cxt);
}
void __kvm_tlb_flush_vmid(struct kvm *kvm)
{
struct tlb_inv_context cxt;
dsb(ishst);
/* Switch to requested VMID */
__tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
__tlb_switch_to_host(kvm, &cxt);
}
void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
struct tlb_inv_context cxt;
/* Switch to requested VMID */
__tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalle1);
dsb(nsh);
isb();
__tlb_switch_to_host(kvm, &cxt);
}
void __kvm_flush_vm_context(void)
{
dsb(ishst);
__tlbi(alle1is);
/*
* VIPT and PIPT caches are not affected by VMID, so no maintenance
* is necessary across a VMID rollover.
*
* VPIPT caches constrain lookup and maintenance to the active VMID,
* so we need to invalidate lines with a stale VMID to avoid an ABA
* race after multiple rollovers.
*
*/
if (icache_is_vpipt())
asm volatile("ic ialluis");
dsb(ish);
}
...@@ -109,6 +109,7 @@ static bool is_ignored_symbol(const char *name, char type) ...@@ -109,6 +109,7 @@ static bool is_ignored_symbol(const char *name, char type)
".LASANPC", /* s390 kasan local symbols */ ".LASANPC", /* s390 kasan local symbols */
"__crc_", /* modversions */ "__crc_", /* modversions */
"__efistub_", /* arm64 EFI stub namespace */ "__efistub_", /* arm64 EFI stub namespace */
"__kvm_nvhe_", /* arm64 non-VHE KVM namespace */
NULL NULL
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment