Commit aa4c9185 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "Two ARM fixes:

   - Ensure the guest PMU context is restored before the first KVM_RUN,
     fixing an issue where EL0 event counting is broken after vCPU
     save/restore

   - Actually initialize ID_AA64PFR0_EL1.{CSV2,CSV3} based on the
     sanitized, system-wide values for protected VMs"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: arm64: Advertise ID_AA64PFR0_EL1.CSV2/3 to protected VMs
  KVM: arm64: PMU: Restore the guest's EL0 event counting after migration
parents 0d3eb744 0bf9601f
...@@ -1890,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits) ...@@ -1890,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
return ret; return ret;
} }
static u64 get_hyp_id_aa64pfr0_el1(void)
{
/*
* Track whether the system isn't affected by spectre/meltdown in the
* hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
* Although this is per-CPU, we make it global for simplicity, e.g., not
* to have to worry about vcpu migration.
*
* Unlike for non-protected VMs, userspace cannot override this for
* protected VMs.
*/
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
return val;
}
static void kvm_hyp_init_symbols(void) static void kvm_hyp_init_symbols(void)
{ {
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
......
...@@ -33,11 +33,14 @@ ...@@ -33,11 +33,14 @@
* Allow for protected VMs: * Allow for protected VMs:
* - Floating-point and Advanced SIMD * - Floating-point and Advanced SIMD
* - Data Independent Timing * - Data Independent Timing
* - Spectre/Meltdown Mitigation
*/ */
#define PVM_ID_AA64PFR0_ALLOW (\ #define PVM_ID_AA64PFR0_ALLOW (\
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \ ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
) )
/* /*
......
...@@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val, ...@@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
{ {
const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
u64 set_mask = 0; u64 set_mask = 0;
u64 allow_mask = PVM_ID_AA64PFR0_ALLOW; u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val, set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
/* Spectre and Meltdown mitigation in KVM */
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
(u64)kvm->arch.pfr0_csv2);
set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
(u64)kvm->arch.pfr0_csv3);
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
} }
......
...@@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) ...@@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
for_each_set_bit(i, &mask, 32) for_each_set_bit(i, &mask, 32)
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
} }
kvm_vcpu_pmu_restore_guest(vcpu);
} }
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
......
...@@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_supports_32bit_el0()) if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC; val |= ARMV8_PMU_PMCR_LC;
kvm_pmu_handle_pmcr(vcpu, val); kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
} else { } else {
/* PMCR.P & PMCR.C are RAZ */ /* PMCR.P & PMCR.C are RAZ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0) val = __vcpu_sys_reg(vcpu, PMCR_EL0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment