Commit ef3be860 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Add save/restore support for FPMR

Just like the rest of the FP/SIMD state, FPMR needs to be context
switched.

The only interesting thing here is that we need to treat the pKVM
part a bit differently, as the host FP state is never written back
to the vcpu thread, but instead stored locally and eagerly restored.
Reviewed-by: default avatarMark Brown <broonie@kernel.org>
Tested-by: default avatarMark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20240820131802.3547589-5-maz@kernel.orgSigned-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 7d9c1ed6
...@@ -599,6 +599,16 @@ struct kvm_host_data { ...@@ -599,6 +599,16 @@ struct kvm_host_data {
struct cpu_sve_state *sve_state; struct cpu_sve_state *sve_state;
}; };
union {
/* HYP VA pointer to the host storage for FPMR */
u64 *fpmr_ptr;
/*
* Used by pKVM only, as it needs to provide storage
* for the host
*/
u64 fpmr;
};
/* Ownership of the FP regs */ /* Ownership of the FP regs */
enum { enum {
FP_STATE_FREE, FP_STATE_FREE,
......
...@@ -63,6 +63,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) ...@@ -63,6 +63,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
*/ */
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED; *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state); *host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
*host_data_ptr(fpmr_ptr) = kern_hyp_va(&current->thread.uw.fpmr);
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
......
...@@ -404,6 +404,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -404,6 +404,9 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
else else
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm)))
write_sysreg_s(__vcpu_sys_reg(vcpu, FPMR), SYS_FPMR);
/* Skip restoring fpexc32 for AArch64 guests */ /* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW)) if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
......
...@@ -62,6 +62,8 @@ static void fpsimd_sve_flush(void) ...@@ -62,6 +62,8 @@ static void fpsimd_sve_flush(void)
static void fpsimd_sve_sync(struct kvm_vcpu *vcpu) static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
{ {
bool has_fpmr;
if (!guest_owns_fp_regs()) if (!guest_owns_fp_regs())
return; return;
...@@ -73,11 +75,18 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu) ...@@ -73,11 +75,18 @@ static void fpsimd_sve_sync(struct kvm_vcpu *vcpu)
else else
__fpsimd_save_state(&vcpu->arch.ctxt.fp_regs); __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs);
has_fpmr = kvm_has_fpmr(kern_hyp_va(vcpu->kvm));
if (has_fpmr)
__vcpu_sys_reg(vcpu, FPMR) = read_sysreg_s(SYS_FPMR);
if (system_supports_sve()) if (system_supports_sve())
__hyp_sve_restore_host(); __hyp_sve_restore_host();
else else
__fpsimd_restore_state(*host_data_ptr(fpsimd_state)); __fpsimd_restore_state(*host_data_ptr(fpsimd_state));
if (has_fpmr)
write_sysreg_s(*host_data_ptr(fpmr), SYS_FPMR);
*host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED; *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
} }
......
...@@ -198,6 +198,15 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) ...@@ -198,6 +198,15 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
} else { } else {
__fpsimd_save_state(*host_data_ptr(fpsimd_state)); __fpsimd_save_state(*host_data_ptr(fpsimd_state));
} }
if (kvm_has_fpmr(kern_hyp_va(vcpu->kvm))) {
u64 val = read_sysreg_s(SYS_FPMR);
if (unlikely(is_protected_kvm_enabled()))
*host_data_ptr(fpmr) = val;
else
**host_data_ptr(fpmr_ptr) = val;
}
} }
static const exit_handler_fn hyp_exit_handlers[] = { static const exit_handler_fn hyp_exit_handlers[] = {
......
...@@ -312,6 +312,9 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -312,6 +312,9 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code)
static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
{ {
__fpsimd_save_state(*host_data_ptr(fpsimd_state)); __fpsimd_save_state(*host_data_ptr(fpsimd_state));
if (kvm_has_fpmr(vcpu->kvm))
**host_data_ptr(fpmr_ptr) = read_sysreg_s(SYS_FPMR);
} }
static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code) static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment