Commit 0d874858 authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/vm-configuration into kvmarm/next

* kvm-arm64/vm-configuration: (29 commits)
  : VM configuration enforcement, courtesy of Marc Zyngier
  :
  : Userspace has gained the ability to control the features visible
  : through the ID registers, yet KVM didn't take this into account as the
  : effective feature set when determing trap / emulation behavior. This
  : series adds:
  :
  :  - Mechanism for testing the presence of a particular CPU feature in the
  :    guest's ID registers
  :
  :  - Infrastructure for computing the effective value of VNCR-backed
  :    registers, taking into account the RES0 / RES1 bits for a particular
  :    VM configuration
  :
  :  - Implementation of 'fine-grained UNDEF' controls that shadow the FGT
  :    register definitions.
  KVM: arm64: Don't initialize idreg debugfs w/ preemption disabled
  KVM: arm64: Fail the idreg iterator if idregs aren't initialized
  KVM: arm64: Make build-time check of RES0/RES1 bits optional
  KVM: arm64: Add debugfs file for guest's ID registers
  KVM: arm64: Snapshot all non-zero RES0/RES1 sysreg fields for later checking
  KVM: arm64: Make FEAT_MOPS UNDEF if not advertised to the guest
  KVM: arm64: Make AMU sysreg UNDEF if FEAT_AMU is not advertised to the guest
  KVM: arm64: Make PIR{,E0}_EL1 UNDEF if S1PIE is not advertised to the guest
  KVM: arm64: Make TLBI OS/Range UNDEF if not advertised to the guest
  KVM: arm64: Streamline save/restore of HFG[RW]TR_EL2
  KVM: arm64: Move existing feature disabling over to FGU infrastructure
  KVM: arm64: Propagate and handle Fine-Grained UNDEF bits
  KVM: arm64: Add Fine-Grained UNDEF tracking information
  KVM: arm64: Rename __check_nv_sr_forward() to triage_sysreg_trap()
  KVM: arm64: Use the xarray as the primary sysreg/sysinsn walker
  KVM: arm64: Register AArch64 system register entries with the sysreg xarray
  KVM: arm64: Always populate the trap configuration xarray
  KVM: arm64: nv: Move system instructions to their own sys_reg_desc array
  KVM: arm64: Drop the requirement for XARRAY_MULTI
  KVM: arm64: nv: Turn encoding ranges into discrete XArray stores
  ...
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents a040adfb 5c1ebe9a
...@@ -102,9 +102,7 @@ ...@@ -102,9 +102,7 @@
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
#define HCRX_GUEST_FLAGS \ #define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
(HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \
(cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0))
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En)
/* TCR_EL2 Registers bits */ /* TCR_EL2 Registers bits */
......
...@@ -238,9 +238,32 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr) ...@@ -238,9 +238,32 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
return index; return index;
} }
struct kvm_sysreg_masks;
enum fgt_group_id {
__NO_FGT_GROUP__,
HFGxTR_GROUP,
HDFGRTR_GROUP,
HDFGWTR_GROUP = HDFGRTR_GROUP,
HFGITR_GROUP,
HAFGRTR_GROUP,
/* Must be last */
__NR_FGT_GROUP_IDS__
};
struct kvm_arch { struct kvm_arch {
struct kvm_s2_mmu mmu; struct kvm_s2_mmu mmu;
/*
* Fine-Grained UNDEF, mimicking the FGT layout defined by the
* architecture. We track them globally, as we present the
* same feature-set to all vcpus.
*
* Index 0 is currently spare.
*/
u64 fgu[__NR_FGT_GROUP_IDS__];
/* Interrupt controller */ /* Interrupt controller */
struct vgic_dist vgic; struct vgic_dist vgic;
...@@ -274,6 +297,8 @@ struct kvm_arch { ...@@ -274,6 +297,8 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6 #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
/* Initial ID reg values loaded */ /* Initial ID reg values loaded */
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
/* Fine-Grained UNDEF initialised */
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
unsigned long flags; unsigned long flags;
/* VM-wide vCPU feature set */ /* VM-wide vCPU feature set */
...@@ -294,6 +319,9 @@ struct kvm_arch { ...@@ -294,6 +319,9 @@ struct kvm_arch {
/* PMCR_EL0.N value for the guest */ /* PMCR_EL0.N value for the guest */
u8 pmcr_n; u8 pmcr_n;
/* Iterator for idreg debugfs */
u8 idreg_debugfs_iter;
/* Hypercall features firmware registers' descriptor */ /* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat; struct kvm_smccc_features smccc_feat;
struct maple_tree smccc_filter; struct maple_tree smccc_filter;
...@@ -312,6 +340,9 @@ struct kvm_arch { ...@@ -312,6 +340,9 @@ struct kvm_arch {
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1) #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM]; u64 id_regs[KVM_ARM_ID_REG_NUM];
/* Masks for VNCR-baked sysregs */
struct kvm_sysreg_masks *sysreg_masks;
/* /*
* For an untrusted host VM, 'pkvm.handle' is used to lookup * For an untrusted host VM, 'pkvm.handle' is used to lookup
* the associated pKVM instance in the hypervisor. * the associated pKVM instance in the hypervisor.
...@@ -474,6 +505,13 @@ enum vcpu_sysreg { ...@@ -474,6 +505,13 @@ enum vcpu_sysreg {
NR_SYS_REGS /* Nothing after this line! */ NR_SYS_REGS /* Nothing after this line! */
}; };
struct kvm_sysreg_masks {
struct {
u64 res0;
u64 res1;
} mask[NR_SYS_REGS - __VNCR_START__];
};
struct kvm_cpu_context { struct kvm_cpu_context {
struct user_pt_regs regs; /* sp = sp_el0 */ struct user_pt_regs regs; /* sp = sp_el0 */
...@@ -549,6 +587,7 @@ struct kvm_vcpu_arch { ...@@ -549,6 +587,7 @@ struct kvm_vcpu_arch {
/* Values of trap registers for the guest. */ /* Values of trap registers for the guest. */
u64 hcr_el2; u64 hcr_el2;
u64 hcrx_el2;
u64 mdcr_el2; u64 mdcr_el2;
u64 cptr_el2; u64 cptr_el2;
...@@ -868,7 +907,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r) ...@@ -868,7 +907,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r)) #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r))) u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
#define __vcpu_sys_reg(v,r) \
(*({ \
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
*__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
__r; \
}))
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
...@@ -1055,14 +1102,20 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); ...@@ -1055,14 +1102,20 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
void kvm_sys_regs_create_debugfs(struct kvm *kvm);
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
int __init kvm_sys_reg_table_init(void); int __init kvm_sys_reg_table_init(void);
struct sys_reg_desc;
int __init populate_sysreg_config(const struct sys_reg_desc *sr,
unsigned int idx);
int __init populate_nv_trap_config(void); int __init populate_nv_trap_config(void);
bool lock_all_vcpus(struct kvm *kvm); bool lock_all_vcpus(struct kvm *kvm);
void unlock_all_vcpus(struct kvm *kvm); void unlock_all_vcpus(struct kvm *kvm);
void kvm_init_sysreg(struct kvm_vcpu *);
/* MMIO helpers */ /* MMIO helpers */
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
...@@ -1233,4 +1286,48 @@ static inline void kvm_hyp_reserve(void) { } ...@@ -1233,4 +1286,48 @@ static inline void kvm_hyp_reserve(void) { }
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
#define __expand_field_sign_unsigned(id, fld, val) \
((u64)SYS_FIELD_VALUE(id, fld, val))
#define __expand_field_sign_signed(id, fld, val) \
({ \
u64 __val = SYS_FIELD_VALUE(id, fld, val); \
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
})
#define expand_field_sign(id, fld, val) \
(id##_##fld##_SIGNED ? \
__expand_field_sign_signed(id, fld, val) : \
__expand_field_sign_unsigned(id, fld, val))
#define get_idreg_field_unsigned(kvm, id, fld) \
({ \
u64 __val = IDREG((kvm), SYS_##id); \
FIELD_GET(id##_##fld##_MASK, __val); \
})
#define get_idreg_field_signed(kvm, id, fld) \
({ \
u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
})
#define get_idreg_field_enum(kvm, id, fld) \
get_idreg_field_unsigned(kvm, id, fld)
#define get_idreg_field(kvm, id, fld) \
(id##_##fld##_SIGNED ? \
get_idreg_field_signed(kvm, id, fld) : \
get_idreg_field_unsigned(kvm, id, fld))
#define kvm_has_feat(kvm, id, fld, limit) \
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
#define kvm_has_feat_enum(kvm, id, fld, val) \
(get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
#define kvm_has_feat_range(kvm, id, fld, min, max) \
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
#endif /* __ARM64_KVM_HOST_H__ */ #endif /* __ARM64_KVM_HOST_H__ */
...@@ -60,7 +60,6 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) ...@@ -60,7 +60,6 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
return ttbr0 & ~GENMASK_ULL(63, 48); return ttbr0 & ~GENMASK_ULL(63, 48);
} }
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
int kvm_init_nv_sysregs(struct kvm *kvm); int kvm_init_nv_sysregs(struct kvm *kvm);
......
...@@ -39,7 +39,6 @@ menuconfig KVM ...@@ -39,7 +39,6 @@ menuconfig KVM
select HAVE_KVM_VCPU_RUN_PID_CHANGE select HAVE_KVM_VCPU_RUN_PID_CHANGE
select SCHED_INFO select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS select GUEST_PERF_EVENTS if PERF_EVENTS
select XARRAY_MULTI
help help
Support hosting virtualized guest machines. Support hosting virtualized guest machines.
...@@ -68,4 +67,15 @@ config PROTECTED_NVHE_STACKTRACE ...@@ -68,4 +67,15 @@ config PROTECTED_NVHE_STACKTRACE
If unsure, or not using protected nVHE (pKVM), say N. If unsure, or not using protected nVHE (pKVM), say N.
config KVM_ARM64_RES_BITS_PARANOIA
bool "Build-time check of RES0/RES1 bits"
depends on KVM
default n
help
Say Y here to validate that KVM's knowledge of most system
registers' RES0/RES1 bits matches when the rest of the kernel
defines. Expect the build to fail badly if you enable this.
Just say N.
endif # VIRTUALIZATION endif # VIRTUALIZATION
...@@ -190,6 +190,10 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) ...@@ -190,6 +190,10 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
void kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
kvm_sys_regs_create_debugfs(kvm);
}
/** /**
* kvm_arch_destroy_vm - destroy the VM data structure * kvm_arch_destroy_vm - destroy the VM data structure
...@@ -206,6 +210,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -206,6 +210,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
pkvm_destroy_hyp_vm(kvm); pkvm_destroy_hyp_vm(kvm);
kfree(kvm->arch.mpidr_data); kfree(kvm->arch.mpidr_data);
kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm); kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1); kvm_unshare_hyp(kvm, kvm + 1);
...@@ -674,6 +679,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) ...@@ -674,6 +679,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
/*
* This needs to happen after NV has imposed its own restrictions on
* the feature set
*/
kvm_init_sysreg(vcpu);
ret = kvm_timer_enable(vcpu); ret = kvm_timer_enable(vcpu);
if (ret) if (ret)
return ret; return ret;
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2024 - Google LLC
* Author: Marc Zyngier <maz@kernel.org>
*/
#include <asm/sysreg-defs.h>
/*
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
*
* If any of these BUILD_BUG_ON() fails, that's because some bits that
* were reserved have gained some other meaning, and KVM needs to know
* about those.
*
* In such case, do *NOT* blindly change the assertion so that it
* passes, but also teach the rest of the code about the actual
* change.
*
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
*/
static inline void check_res_bits(void)
{
#ifdef CONFIG_KVM_ARM64_RES_BITS_PARANOIA
BUILD_BUG_ON(OSDTRRX_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(MDCCINT_EL1_RES0 != (GENMASK_ULL(63, 31) | GENMASK_ULL(28, 0)));
BUILD_BUG_ON(MDSCR_EL1_RES0 != (GENMASK_ULL(63, 36) | GENMASK_ULL(28, 28) | GENMASK_ULL(25, 24) | GENMASK_ULL(20, 20) | GENMASK_ULL(18, 16) | GENMASK_ULL(11, 7) | GENMASK_ULL(5, 1)));
BUILD_BUG_ON(OSDTRTX_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(OSECCR_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(OSLAR_EL1_RES0 != (GENMASK_ULL(63, 1)));
BUILD_BUG_ON(ID_PFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_PFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_DFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_AFR0_EL1_RES0 != (GENMASK_ULL(63, 16)));
BUILD_BUG_ON(ID_MMFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_MMFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_MMFR2_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_MMFR3_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR0_EL1_RES0 != (GENMASK_ULL(63, 28)));
BUILD_BUG_ON(ID_ISAR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR2_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR3_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR4_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_ISAR5_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(23, 20)));
BUILD_BUG_ON(ID_ISAR6_EL1_RES0 != (GENMASK_ULL(63, 28)));
BUILD_BUG_ON(ID_MMFR4_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(MVFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(MVFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(MVFR2_EL1_RES0 != (GENMASK_ULL(63, 8)));
BUILD_BUG_ON(ID_PFR2_EL1_RES0 != (GENMASK_ULL(63, 12)));
BUILD_BUG_ON(ID_DFR1_EL1_RES0 != (GENMASK_ULL(63, 8)));
BUILD_BUG_ON(ID_MMFR5_EL1_RES0 != (GENMASK_ULL(63, 8)));
BUILD_BUG_ON(ID_AA64PFR1_EL1_RES0 != (GENMASK_ULL(23, 20)));
BUILD_BUG_ON(ID_AA64PFR2_EL1_RES0 != (GENMASK_ULL(63, 36) | GENMASK_ULL(31, 12)));
BUILD_BUG_ON(ID_AA64ZFR0_EL1_RES0 != (GENMASK_ULL(63, 60) | GENMASK_ULL(51, 48) | GENMASK_ULL(39, 36) | GENMASK_ULL(31, 28) | GENMASK_ULL(15, 8)));
BUILD_BUG_ON(ID_AA64SMFR0_EL1_RES0 != (GENMASK_ULL(62, 61) | GENMASK_ULL(51, 49) | GENMASK_ULL(31, 31) | GENMASK_ULL(27, 0)));
BUILD_BUG_ON(ID_AA64FPFR0_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 2)));
BUILD_BUG_ON(ID_AA64DFR0_EL1_RES0 != (GENMASK_ULL(27, 24) | GENMASK_ULL(19, 16)));
BUILD_BUG_ON(ID_AA64DFR1_EL1_RES0 != (GENMASK_ULL(63, 0)));
BUILD_BUG_ON(ID_AA64AFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(ID_AA64AFR1_EL1_RES0 != (GENMASK_ULL(63, 0)));
BUILD_BUG_ON(ID_AA64ISAR0_EL1_RES0 != (GENMASK_ULL(3, 0)));
BUILD_BUG_ON(ID_AA64ISAR2_EL1_RES0 != (GENMASK_ULL(47, 44)));
BUILD_BUG_ON(ID_AA64ISAR3_EL1_RES0 != (GENMASK_ULL(63, 16)));
BUILD_BUG_ON(ID_AA64MMFR0_EL1_RES0 != (GENMASK_ULL(55, 48)));
BUILD_BUG_ON(ID_AA64MMFR2_EL1_RES0 != (GENMASK_ULL(47, 44)));
BUILD_BUG_ON(ID_AA64MMFR3_EL1_RES0 != (GENMASK_ULL(51, 48)));
BUILD_BUG_ON(ID_AA64MMFR4_EL1_RES0 != (GENMASK_ULL(63, 40) | GENMASK_ULL(35, 28) | GENMASK_ULL(3, 0)));
BUILD_BUG_ON(SCTLR_EL1_RES0 != (GENMASK_ULL(17, 17)));
BUILD_BUG_ON(CPACR_ELx_RES0 != (GENMASK_ULL(63, 30) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | GENMASK_ULL(19, 18) | GENMASK_ULL(15, 0)));
BUILD_BUG_ON(SMPRI_EL1_RES0 != (GENMASK_ULL(63, 4)));
BUILD_BUG_ON(ZCR_ELx_RES0 != (GENMASK_ULL(63, 9)));
BUILD_BUG_ON(SMCR_ELx_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(29, 9)));
BUILD_BUG_ON(GCSCR_ELx_RES0 != (GENMASK_ULL(63, 10) | GENMASK_ULL(7, 7) | GENMASK_ULL(4, 1)));
BUILD_BUG_ON(GCSPR_ELx_RES0 != (GENMASK_ULL(2, 0)));
BUILD_BUG_ON(GCSCRE0_EL1_RES0 != (GENMASK_ULL(63, 11) | GENMASK_ULL(7, 6) | GENMASK_ULL(4, 1)));
BUILD_BUG_ON(ALLINT_RES0 != (GENMASK_ULL(63, 14) | GENMASK_ULL(12, 0)));
BUILD_BUG_ON(PMSCR_EL1_RES0 != (GENMASK_ULL(63, 8) | GENMASK_ULL(2, 2)));
BUILD_BUG_ON(PMSICR_EL1_RES0 != (GENMASK_ULL(55, 32)));
BUILD_BUG_ON(PMSIRR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(7, 1)));
BUILD_BUG_ON(PMSFCR_EL1_RES0 != (GENMASK_ULL(63, 19) | GENMASK_ULL(15, 4)));
BUILD_BUG_ON(PMSLATFR_EL1_RES0 != (GENMASK_ULL(63, 16)));
BUILD_BUG_ON(PMSIDR_EL1_RES0 != (GENMASK_ULL(63, 25) | GENMASK_ULL(7, 7)));
BUILD_BUG_ON(PMBLIMITR_EL1_RES0 != (GENMASK_ULL(11, 6) | GENMASK_ULL(4, 3)));
BUILD_BUG_ON(PMBSR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(25, 20)));
BUILD_BUG_ON(PMBIDR_EL1_RES0 != (GENMASK_ULL(63, 12) | GENMASK_ULL(7, 6)));
BUILD_BUG_ON(CONTEXTIDR_ELx_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(CCSIDR_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(CLIDR_EL1_RES0 != (GENMASK_ULL(63, 47)));
BUILD_BUG_ON(CCSIDR2_EL1_RES0 != (GENMASK_ULL(63, 24)));
BUILD_BUG_ON(GMID_EL1_RES0 != (GENMASK_ULL(63, 4)));
BUILD_BUG_ON(SMIDR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(14, 12)));
BUILD_BUG_ON(CSSELR_EL1_RES0 != (GENMASK_ULL(63, 5)));
BUILD_BUG_ON(CTR_EL0_RES0 != (GENMASK_ULL(63, 38) | GENMASK_ULL(30, 30) | GENMASK_ULL(13, 4)));
BUILD_BUG_ON(CTR_EL0_RES1 != (GENMASK_ULL(31, 31)));
BUILD_BUG_ON(DCZID_EL0_RES0 != (GENMASK_ULL(63, 5)));
BUILD_BUG_ON(SVCR_RES0 != (GENMASK_ULL(63, 2)));
BUILD_BUG_ON(FPMR_RES0 != (GENMASK_ULL(63, 38) | GENMASK_ULL(23, 23) | GENMASK_ULL(13, 9)));
BUILD_BUG_ON(HFGxTR_EL2_RES0 != (GENMASK_ULL(51, 51)));
BUILD_BUG_ON(HFGITR_EL2_RES0 != (GENMASK_ULL(63, 63) | GENMASK_ULL(61, 61)));
BUILD_BUG_ON(HDFGRTR_EL2_RES0 != (GENMASK_ULL(49, 49) | GENMASK_ULL(42, 42) | GENMASK_ULL(39, 38) | GENMASK_ULL(21, 20) | GENMASK_ULL(8, 8)));
BUILD_BUG_ON(HDFGWTR_EL2_RES0 != (GENMASK_ULL(63, 63) | GENMASK_ULL(59, 58) | GENMASK_ULL(51, 51) | GENMASK_ULL(47, 47) | GENMASK_ULL(43, 43) | GENMASK_ULL(40, 38) | GENMASK_ULL(34, 34) | GENMASK_ULL(30, 30) | GENMASK_ULL(22, 22) | GENMASK_ULL(9, 9) | GENMASK_ULL(6, 6)));
BUILD_BUG_ON(HAFGRTR_EL2_RES0 != (GENMASK_ULL(63, 50) | GENMASK_ULL(16, 5)));
BUILD_BUG_ON(HCRX_EL2_RES0 != (GENMASK_ULL(63, 25) | GENMASK_ULL(13, 12)));
BUILD_BUG_ON(DACR32_EL2_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(PMSCR_EL2_RES0 != (GENMASK_ULL(63, 8) | GENMASK_ULL(2, 2)));
BUILD_BUG_ON(TCR2_EL1x_RES0 != (GENMASK_ULL(63, 16) | GENMASK_ULL(13, 12) | GENMASK_ULL(9, 6)));
BUILD_BUG_ON(TCR2_EL2_RES0 != (GENMASK_ULL(63, 16)));
BUILD_BUG_ON(LORSA_EL1_RES0 != (GENMASK_ULL(63, 52) | GENMASK_ULL(15, 1)));
BUILD_BUG_ON(LOREA_EL1_RES0 != (GENMASK_ULL(63, 52) | GENMASK_ULL(15, 0)));
BUILD_BUG_ON(LORN_EL1_RES0 != (GENMASK_ULL(63, 8)));
BUILD_BUG_ON(LORC_EL1_RES0 != (GENMASK_ULL(63, 10) | GENMASK_ULL(1, 1)));
BUILD_BUG_ON(LORID_EL1_RES0 != (GENMASK_ULL(63, 24) | GENMASK_ULL(15, 8)));
BUILD_BUG_ON(ISR_EL1_RES0 != (GENMASK_ULL(63, 11) | GENMASK_ULL(5, 0)));
BUILD_BUG_ON(ICC_NMIAR1_EL1_RES0 != (GENMASK_ULL(63, 24)));
BUILD_BUG_ON(TRBLIMITR_EL1_RES0 != (GENMASK_ULL(11, 7)));
BUILD_BUG_ON(TRBBASER_EL1_RES0 != (GENMASK_ULL(11, 0)));
BUILD_BUG_ON(TRBSR_EL1_RES0 != (GENMASK_ULL(63, 56) | GENMASK_ULL(25, 24) | GENMASK_ULL(19, 19) | GENMASK_ULL(16, 16)));
BUILD_BUG_ON(TRBMAR_EL1_RES0 != (GENMASK_ULL(63, 12)));
BUILD_BUG_ON(TRBTRG_EL1_RES0 != (GENMASK_ULL(63, 32)));
BUILD_BUG_ON(TRBIDR_EL1_RES0 != (GENMASK_ULL(63, 12) | GENMASK_ULL(7, 6)));
#endif
}
...@@ -427,12 +427,14 @@ static const complex_condition_check ccc[] = { ...@@ -427,12 +427,14 @@ static const complex_condition_check ccc[] = {
* [19:14] bit number in the FGT register (6 bits) * [19:14] bit number in the FGT register (6 bits)
* [20] trap polarity (1 bit) * [20] trap polarity (1 bit)
* [25:21] FG filter (5 bits) * [25:21] FG filter (5 bits)
* [62:26] Unused (37 bits) * [35:26] Main SysReg table index (10 bits)
* [62:36] Unused (27 bits)
* [63] RES0 - Must be zero, as lost on insertion in the xarray * [63] RES0 - Must be zero, as lost on insertion in the xarray
*/ */
#define TC_CGT_BITS 10 #define TC_CGT_BITS 10
#define TC_FGT_BITS 4 #define TC_FGT_BITS 4
#define TC_FGF_BITS 5 #define TC_FGF_BITS 5
#define TC_SRI_BITS 10
union trap_config { union trap_config {
u64 val; u64 val;
...@@ -442,7 +444,8 @@ union trap_config { ...@@ -442,7 +444,8 @@ union trap_config {
unsigned long bit:6; /* Bit number */ unsigned long bit:6; /* Bit number */
unsigned long pol:1; /* Polarity */ unsigned long pol:1; /* Polarity */
unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */ unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */
unsigned long unused:37; /* Unused, should be zero */ unsigned long sri:TC_SRI_BITS; /* SysReg Index */
unsigned long unused:27; /* Unused, should be zero */
unsigned long mbz:1; /* Must Be Zero */ unsigned long mbz:1; /* Must Be Zero */
}; };
}; };
...@@ -1006,18 +1009,6 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { ...@@ -1006,18 +1009,6 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
static DEFINE_XARRAY(sr_forward_xa); static DEFINE_XARRAY(sr_forward_xa);
enum fgt_group_id {
__NO_FGT_GROUP__,
HFGxTR_GROUP,
HDFGRTR_GROUP,
HDFGWTR_GROUP,
HFGITR_GROUP,
HAFGRTR_GROUP,
/* Must be last */
__NR_FGT_GROUP_IDS__
};
enum fg_filter_id { enum fg_filter_id {
__NO_FGF__, __NO_FGF__,
HCRX_FGTnXS, HCRX_FGTnXS,
...@@ -1757,6 +1748,28 @@ static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc, ...@@ -1757,6 +1748,28 @@ static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc,
err); err);
} }
static u32 encoding_next(u32 encoding)
{
u8 op0, op1, crn, crm, op2;
op0 = sys_reg_Op0(encoding);
op1 = sys_reg_Op1(encoding);
crn = sys_reg_CRn(encoding);
crm = sys_reg_CRm(encoding);
op2 = sys_reg_Op2(encoding);
if (op2 < Op2_mask)
return sys_reg(op0, op1, crn, crm, op2 + 1);
if (crm < CRm_mask)
return sys_reg(op0, op1, crn, crm + 1, 0);
if (crn < CRn_mask)
return sys_reg(op0, op1, crn + 1, 0, 0);
if (op1 < Op1_mask)
return sys_reg(op0, op1 + 1, 0, 0, 0);
return sys_reg(op0 + 1, 0, 0, 0, 0);
}
int __init populate_nv_trap_config(void) int __init populate_nv_trap_config(void)
{ {
int ret = 0; int ret = 0;
...@@ -1775,25 +1788,20 @@ int __init populate_nv_trap_config(void) ...@@ -1775,25 +1788,20 @@ int __init populate_nv_trap_config(void)
ret = -EINVAL; ret = -EINVAL;
} }
if (cgt->encoding != cgt->end) { for (u32 enc = cgt->encoding; enc <= cgt->end; enc = encoding_next(enc)) {
prev = xa_store_range(&sr_forward_xa, prev = xa_store(&sr_forward_xa, enc,
cgt->encoding, cgt->end,
xa_mk_value(cgt->tc.val),
GFP_KERNEL);
} else {
prev = xa_store(&sr_forward_xa, cgt->encoding,
xa_mk_value(cgt->tc.val), GFP_KERNEL); xa_mk_value(cgt->tc.val), GFP_KERNEL);
if (prev && !xa_is_err(prev)) { if (prev && !xa_is_err(prev)) {
ret = -EINVAL; ret = -EINVAL;
print_nv_trap_error(cgt, "Duplicate CGT", ret); print_nv_trap_error(cgt, "Duplicate CGT", ret);
} }
}
if (xa_is_err(prev)) { if (xa_is_err(prev)) {
ret = xa_err(prev); ret = xa_err(prev);
print_nv_trap_error(cgt, "Failed CGT insertion", ret); print_nv_trap_error(cgt, "Failed CGT insertion", ret);
} }
} }
}
kvm_info("nv: %ld coarse grained trap handlers\n", kvm_info("nv: %ld coarse grained trap handlers\n",
ARRAY_SIZE(encoding_to_cgt)); ARRAY_SIZE(encoding_to_cgt));
...@@ -1804,6 +1812,7 @@ int __init populate_nv_trap_config(void) ...@@ -1804,6 +1812,7 @@ int __init populate_nv_trap_config(void)
for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) { for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) {
const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i]; const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i];
union trap_config tc; union trap_config tc;
void *prev;
if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) { if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1818,8 +1827,13 @@ int __init populate_nv_trap_config(void) ...@@ -1818,8 +1827,13 @@ int __init populate_nv_trap_config(void)
} }
tc.val |= fgt->tc.val; tc.val |= fgt->tc.val;
xa_store(&sr_forward_xa, fgt->encoding, prev = xa_store(&sr_forward_xa, fgt->encoding,
xa_mk_value(tc.val), GFP_KERNEL); xa_mk_value(tc.val), GFP_KERNEL);
if (xa_is_err(prev)) {
ret = xa_err(prev);
print_nv_trap_error(fgt, "Failed FGT insertion", ret);
}
} }
kvm_info("nv: %ld fine grained trap handlers\n", kvm_info("nv: %ld fine grained trap handlers\n",
...@@ -1845,6 +1859,38 @@ int __init populate_nv_trap_config(void) ...@@ -1845,6 +1859,38 @@ int __init populate_nv_trap_config(void)
return ret; return ret;
} }
int __init populate_sysreg_config(const struct sys_reg_desc *sr,
unsigned int idx)
{
union trap_config tc;
u32 encoding;
void *ret;
/*
* 0 is a valid value for the index, but not for the storage.
* We'll store (idx+1), so check against an offset'd limit.
*/
if (idx >= (BIT(TC_SRI_BITS) - 1)) {
kvm_err("sysreg %s (%d) out of range\n", sr->name, idx);
return -EINVAL;
}
encoding = sys_reg(sr->Op0, sr->Op1, sr->CRn, sr->CRm, sr->Op2);
tc = get_trap_config(encoding);
if (tc.sri) {
kvm_err("sysreg %s (%d) duplicate entry (%d)\n",
sr->name, idx - 1, tc.sri);
return -EINVAL;
}
tc.sri = idx + 1;
ret = xa_store(&sr_forward_xa, encoding,
xa_mk_value(tc.val), GFP_KERNEL);
return xa_err(ret);
}
static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu, static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu,
const struct trap_bits *tb) const struct trap_bits *tb)
{ {
...@@ -1892,20 +1938,64 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu, ...@@ -1892,20 +1938,64 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu,
return __compute_trap_behaviour(vcpu, tc.cgt, b); return __compute_trap_behaviour(vcpu, tc.cgt, b);
} }
static bool check_fgt_bit(u64 val, const union trap_config tc) static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
{ {
return ((val >> tc.bit) & 1) == tc.pol; struct kvm_sysreg_masks *masks;
/* Only handle the VNCR-backed regs for now */
if (sr < __VNCR_START__)
return 0;
masks = kvm->arch.sysreg_masks;
return masks->mask[sr - __VNCR_START__].res0;
} }
#define sanitised_sys_reg(vcpu, reg) \ static bool check_fgt_bit(struct kvm *kvm, bool is_read,
({ \ u64 val, const union trap_config tc)
u64 __val; \ {
__val = __vcpu_sys_reg(vcpu, reg); \ enum vcpu_sysreg sr;
__val &= ~__ ## reg ## _RES0; \
(__val); \ if (tc.pol)
}) return (val & BIT(tc.bit));
/*
* FGTs with negative polarities are an absolute nightmare, as
* we need to evaluate the bit in the light of the feature
* that defines it. WTF were they thinking?
*
* So let's check if the bit has been earmarked as RES0, as
* this indicates an unimplemented feature.
*/
if (val & BIT(tc.bit))
return false;
bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) switch ((enum fgt_group_id)tc.fgt) {
case HFGxTR_GROUP:
sr = is_read ? HFGRTR_EL2 : HFGWTR_EL2;
break;
case HDFGRTR_GROUP:
sr = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2;
break;
case HAFGRTR_GROUP:
sr = HAFGRTR_EL2;
break;
case HFGITR_GROUP:
sr = HFGITR_EL2;
break;
default:
WARN_ONCE(1, "Unhandled FGT group");
return false;
}
return !(kvm_get_sysreg_res0(kvm, sr) & BIT(tc.bit));
}
bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
{ {
union trap_config tc; union trap_config tc;
enum trap_behaviour b; enum trap_behaviour b;
...@@ -1913,9 +2003,6 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) ...@@ -1913,9 +2003,6 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
u32 sysreg; u32 sysreg;
u64 esr, val; u64 esr, val;
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
return false;
esr = kvm_vcpu_get_esr(vcpu); esr = kvm_vcpu_get_esr(vcpu);
sysreg = esr_sys64_to_sysreg(esr); sysreg = esr_sys64_to_sysreg(esr);
is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
...@@ -1926,13 +2013,27 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) ...@@ -1926,13 +2013,27 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
* A value of 0 for the whole entry means that we know nothing * A value of 0 for the whole entry means that we know nothing
* for this sysreg, and that it cannot be re-injected into the * for this sysreg, and that it cannot be re-injected into the
* nested hypervisor. In this situation, let's cut it short. * nested hypervisor. In this situation, let's cut it short.
*
* Note that ultimately, we could also make use of the xarray
* to store the index of the sysreg in the local descriptor
* array, avoiding another search... Hint, hint...
*/ */
if (!tc.val) if (!tc.val)
return false; goto local;
/*
* If a sysreg can be trapped using a FGT, first check whether we
* trap for the purpose of forbidding the feature. In that case,
* inject an UNDEF.
*/
if (tc.fgt != __NO_FGT_GROUP__ &&
(vcpu->kvm->arch.fgu[tc.fgt] & BIT(tc.bit))) {
kvm_inject_undefined(vcpu);
return true;
}
/*
* If we're not nesting, immediately return to the caller, with the
* sysreg index, should we have it.
*/
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
goto local;
switch ((enum fgt_group_id)tc.fgt) { switch ((enum fgt_group_id)tc.fgt) {
case __NO_FGT_GROUP__: case __NO_FGT_GROUP__:
...@@ -1940,25 +2041,24 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) ...@@ -1940,25 +2041,24 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
case HFGxTR_GROUP: case HFGxTR_GROUP:
if (is_read) if (is_read)
val = sanitised_sys_reg(vcpu, HFGRTR_EL2); val = __vcpu_sys_reg(vcpu, HFGRTR_EL2);
else else
val = sanitised_sys_reg(vcpu, HFGWTR_EL2); val = __vcpu_sys_reg(vcpu, HFGWTR_EL2);
break; break;
case HDFGRTR_GROUP: case HDFGRTR_GROUP:
case HDFGWTR_GROUP:
if (is_read) if (is_read)
val = sanitised_sys_reg(vcpu, HDFGRTR_EL2); val = __vcpu_sys_reg(vcpu, HDFGRTR_EL2);
else else
val = sanitised_sys_reg(vcpu, HDFGWTR_EL2); val = __vcpu_sys_reg(vcpu, HDFGWTR_EL2);
break; break;
case HAFGRTR_GROUP: case HAFGRTR_GROUP:
val = sanitised_sys_reg(vcpu, HAFGRTR_EL2); val = __vcpu_sys_reg(vcpu, HAFGRTR_EL2);
break; break;
case HFGITR_GROUP: case HFGITR_GROUP:
val = sanitised_sys_reg(vcpu, HFGITR_EL2); val = __vcpu_sys_reg(vcpu, HFGITR_EL2);
switch (tc.fgf) { switch (tc.fgf) {
u64 tmp; u64 tmp;
...@@ -1966,7 +2066,7 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) ...@@ -1966,7 +2066,7 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
break; break;
case HCRX_FGTnXS: case HCRX_FGTnXS:
tmp = sanitised_sys_reg(vcpu, HCRX_EL2); tmp = __vcpu_sys_reg(vcpu, HCRX_EL2);
if (tmp & HCRX_EL2_FGTnXS) if (tmp & HCRX_EL2_FGTnXS)
tc.fgt = __NO_FGT_GROUP__; tc.fgt = __NO_FGT_GROUP__;
} }
...@@ -1975,10 +2075,11 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) ...@@ -1975,10 +2075,11 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
case __NR_FGT_GROUP_IDS__: case __NR_FGT_GROUP_IDS__:
/* Something is really wrong, bail out */ /* Something is really wrong, bail out */
WARN_ONCE(1, "__NR_FGT_GROUP_IDS__"); WARN_ONCE(1, "__NR_FGT_GROUP_IDS__");
return false; goto local;
} }
if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc)) if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu->kvm, is_read,
val, tc))
goto inject; goto inject;
b = compute_trap_behaviour(vcpu, tc); b = compute_trap_behaviour(vcpu, tc);
...@@ -1987,6 +2088,26 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu) ...@@ -1987,6 +2088,26 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
((b & BEHAVE_FORWARD_WRITE) && !is_read)) ((b & BEHAVE_FORWARD_WRITE) && !is_read))
goto inject; goto inject;
local:
if (!tc.sri) {
struct sys_reg_params params;
params = esr_sys64_to_params(esr);
/*
* Check for the IMPDEF range, as per DDI0487 J.a,
* D18.3.2 Reserved encodings for IMPLEMENTATION
* DEFINED registers.
*/
if (!(params.Op0 == 3 && (params.CRn & 0b1011) == 0b1011))
print_sys_reg_msg(&params,
"Unsupported guest access at: %lx\n",
*vcpu_pc(vcpu));
kvm_inject_undefined(vcpu);
return true;
}
*sr_index = tc.sri - 1;
return false; return false;
inject: inject:
......
...@@ -79,14 +79,48 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) ...@@ -79,14 +79,48 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
clr |= ~hfg & __ ## reg ## _nMASK; \ clr |= ~hfg & __ ## reg ## _nMASK; \
} while(0) } while(0)
#define update_fgt_traps_cs(vcpu, reg, clr, set) \ #define reg_to_fgt_group_id(reg) \
({ \
enum fgt_group_id id; \
switch(reg) { \
case HFGRTR_EL2: \
case HFGWTR_EL2: \
id = HFGxTR_GROUP; \
break; \
case HFGITR_EL2: \
id = HFGITR_GROUP; \
break; \
case HDFGRTR_EL2: \
case HDFGWTR_EL2: \
id = HDFGRTR_GROUP; \
break; \
case HAFGRTR_EL2: \
id = HAFGRTR_GROUP; \
break; \
default: \
BUILD_BUG_ON(1); \
} \
\
id; \
})
#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
do { \
u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
set |= hfg & __ ## reg ## _MASK; \
clr |= hfg & __ ## reg ## _nMASK; \
} while(0)
#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
do { \ do { \
struct kvm_cpu_context *hctxt = \
&this_cpu_ptr(&kvm_host_data)->host_ctxt; \
u64 c = 0, s = 0; \ u64 c = 0, s = 0; \
\ \
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \ ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
compute_clr_set(vcpu, reg, c, s); \ compute_clr_set(vcpu, reg, c, s); \
\
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
\
s |= set; \ s |= set; \
c |= clr; \ c |= clr; \
if (c || s) { \ if (c || s) { \
...@@ -97,8 +131,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) ...@@ -97,8 +131,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
} \ } \
} while(0) } while(0)
#define update_fgt_traps(vcpu, reg) \ #define update_fgt_traps(hctxt, vcpu, kvm, reg) \
update_fgt_traps_cs(vcpu, reg, 0, 0) update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
/* /*
* Validate the fine grain trap masks. * Validate the fine grain trap masks.
...@@ -122,8 +156,7 @@ static inline bool cpu_has_amu(void) ...@@ -122,8 +156,7 @@ static inline bool cpu_has_amu(void)
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; struct kvm *kvm = kern_hyp_va(vcpu->kvm);
u64 r_val, w_val;
CHECK_FGT_MASKS(HFGRTR_EL2); CHECK_FGT_MASKS(HFGRTR_EL2);
CHECK_FGT_MASKS(HFGWTR_EL2); CHECK_FGT_MASKS(HFGWTR_EL2);
...@@ -136,72 +169,45 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) ...@@ -136,72 +169,45 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
if (!cpus_have_final_cap(ARM64_HAS_FGT)) if (!cpus_have_final_cap(ARM64_HAS_FGT))
return; return;
ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2); update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2); update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
if (cpus_have_final_cap(ARM64_SME)) { HFGxTR_EL2_TCR_EL1_MASK : 0);
tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
r_clr |= tmp; update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
w_clr |= tmp;
}
/*
* Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
*/
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
w_set |= HFGxTR_EL2_TCR_EL1_MASK;
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set);
compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
}
/* The default to trap everything not handled or supported in KVM. */
tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1;
r_val = __HFGRTR_EL2_nMASK & ~tmp;
r_val |= r_set;
r_val &= ~r_clr;
w_val = __HFGWTR_EL2_nMASK & ~tmp;
w_val |= w_set;
w_val &= ~w_clr;
write_sysreg_s(r_val, SYS_HFGRTR_EL2);
write_sysreg_s(w_val, SYS_HFGWTR_EL2);
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
return;
update_fgt_traps(vcpu, HFGITR_EL2);
update_fgt_traps(vcpu, HDFGRTR_EL2);
update_fgt_traps(vcpu, HDFGWTR_EL2);
if (cpu_has_amu()) if (cpu_has_amu())
update_fgt_traps(vcpu, HAFGRTR_EL2); update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
} }
#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \
do { \
if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \
write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
SYS_ ## reg); \
} while(0)
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT)) if (!cpus_have_final_cap(ARM64_HAS_FGT))
return; return;
write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2); __deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2);
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2); write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
else
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu)) __deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2);
return; __deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2);
__deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2);
write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2); __deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2);
write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
if (cpu_has_amu()) if (cpu_has_amu())
write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2); __deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
} }
static inline void __activate_traps_common(struct kvm_vcpu *vcpu) static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
...@@ -230,7 +236,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) ...@@ -230,7 +236,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_HAS_HCX)) { if (cpus_have_final_cap(ARM64_HAS_HCX)) {
u64 hcrx = HCRX_GUEST_FLAGS; u64 hcrx = vcpu->arch.hcrx_el2;
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
u64 clr = 0, set = 0; u64 clr = 0, set = 0;
......
...@@ -27,16 +27,34 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) ...@@ -27,16 +27,34 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
} }
static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
{ {
struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
if (!vcpu) if (!vcpu)
vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
return vcpu;
}
static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
return kvm_has_mte(kern_hyp_va(vcpu->kvm)); return kvm_has_mte(kern_hyp_va(vcpu->kvm));
} }
static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt)
{
struct kvm_vcpu *vcpu;
if (!cpus_have_final_cap(ARM64_HAS_S1PIE))
return false;
vcpu = ctxt_to_vcpu(ctxt);
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP);
}
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{ {
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR); ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
...@@ -55,7 +73,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) ...@@ -55,7 +73,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR); ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR); ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL); ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
if (cpus_have_final_cap(ARM64_HAS_S1PIE)) { if (ctxt_has_s1pie(ctxt)) {
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR); ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0); ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
} }
...@@ -131,7 +149,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) ...@@ -131,7 +149,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR); write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR); write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL); write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
if (cpus_have_final_cap(ARM64_HAS_S1PIE)) { if (ctxt_has_s1pie(ctxt)) {
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR); write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0); write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
} }
......
...@@ -163,15 +163,280 @@ static u64 limit_nv_id_reg(u32 id, u64 val) ...@@ -163,15 +163,280 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
return val; return val;
} }
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
{
u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
struct kvm_sysreg_masks *masks;
masks = vcpu->kvm->arch.sysreg_masks;
if (masks) {
sr -= __VNCR_START__;
v &= ~masks->mask[sr].res0;
v |= masks->mask[sr].res1;
}
return v;
}
static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
{
int i = sr - __VNCR_START__;
kvm->arch.sysreg_masks->mask[i].res0 = res0;
kvm->arch.sysreg_masks->mask[i].res1 = res1;
}
int kvm_init_nv_sysregs(struct kvm *kvm) int kvm_init_nv_sysregs(struct kvm *kvm)
{ {
u64 res0, res1;
int ret = 0;
mutex_lock(&kvm->arch.config_lock); mutex_lock(&kvm->arch.config_lock);
if (kvm->arch.sysreg_masks)
goto out;
kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
GFP_KERNEL);
if (!kvm->arch.sysreg_masks) {
ret = -ENOMEM;
goto out;
}
for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++) for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i), kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
kvm->arch.id_regs[i]); kvm->arch.id_regs[i]);
/* VTTBR_EL2 */
res0 = res1 = 0;
if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
res0 |= GENMASK(63, 56);
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
res0 |= VTTBR_CNP_BIT;
set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
/* VTCR_EL2 */
res0 = GENMASK(63, 32) | GENMASK(30, 20);
res1 = BIT(31);
set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
/* VMPIDR_EL2 */
res0 = GENMASK(63, 40) | GENMASK(30, 24);
res1 = BIT(31);
set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
/* HCR_EL2 */
res0 = BIT(48);
res1 = HCR_RW;
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
res0 |= GENMASK(63, 59);
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
res0 |= (HCR_TTLBIS | HCR_TTLBOS);
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
!kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
res0 |= HCR_ENSCXT;
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
res0 |= (HCR_TOCU | HCR_TICAB | HCR_TID4);
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
res0 |= HCR_AMVOFFEN;
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
res0 |= HCR_FIEN;
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
res0 |= HCR_FWB;
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
res0 |= HCR_NV2;
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
res0 |= (HCR_API | HCR_APK);
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
res0 |= BIT(39);
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
res0 |= (HCR_TEA | HCR_TERR);
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
res0 |= HCR_TLOR;
if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
res1 |= HCR_E2H;
set_sysreg_masks(kvm, HCR_EL2, res0, res1);
/* HCRX_EL2 */
res0 = HCRX_EL2_RES0;
res1 = HCRX_EL2_RES1;
if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP))
res0 |= HCRX_EL2_PACMEn;
if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP))
res0 |= HCRX_EL2_EnFPM;
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
res0 |= HCRX_EL2_GCSEn;
if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP))
res0 |= HCRX_EL2_EnIDCP128;
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC))
res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR);
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP))
res0 |= HCRX_EL2_TMEA;
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
res0 |= HCRX_EL2_D128En;
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
res0 |= HCRX_EL2_PTTWI;
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP))
res0 |= HCRX_EL2_SCTLR2En;
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
res0 |= HCRX_EL2_TCR2En;
if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP))
res0 |= HCRX_EL2_CMOW;
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT);
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ||
!(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS))
res0 |= HCRX_EL2_SMPME;
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS);
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
res0 |= HCRX_EL2_EnASR;
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
res0 |= HCRX_EL2_EnALS;
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
res0 |= HCRX_EL2_EnAS0;
set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
/* HFG[RW]TR_EL2 */
res0 = res1 = 0;
if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey |
HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey |
HFGxTR_EL2_APIBKey);
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
res0 |= (HFGxTR_EL2_LORC_EL1 | HFGxTR_EL2_LOREA_EL1 |
HFGxTR_EL2_LORID_EL1 | HFGxTR_EL2_LORN_EL1 |
HFGxTR_EL2_LORSA_EL1);
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
!kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
res0 |= (HFGxTR_EL2_SCXTNUM_EL1 | HFGxTR_EL2_SCXTNUM_EL0);
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP))
res0 |= HFGxTR_EL2_ICC_IGRPENn_EL1;
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
res0 |= (HFGxTR_EL2_ERRIDR_EL1 | HFGxTR_EL2_ERRSELR_EL1 |
HFGxTR_EL2_ERXFR_EL1 | HFGxTR_EL2_ERXCTLR_EL1 |
HFGxTR_EL2_ERXSTATUS_EL1 | HFGxTR_EL2_ERXMISCn_EL1 |
HFGxTR_EL2_ERXPFGF_EL1 | HFGxTR_EL2_ERXPFGCTL_EL1 |
HFGxTR_EL2_ERXPFGCDN_EL1 | HFGxTR_EL2_ERXADDR_EL1);
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
res0 |= HFGxTR_EL2_nACCDATA_EL1;
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
res0 |= (HFGxTR_EL2_nGCS_EL0 | HFGxTR_EL2_nGCS_EL1);
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0);
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
res0 |= HFGxTR_EL2_nRCWMASK_EL1;
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1);
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1);
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
res0 |= HFGxTR_EL2_nS2POR_EL1;
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
res0 |= (HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nAMAIR2_EL1);
set_sysreg_masks(kvm, HFGRTR_EL2, res0 | __HFGRTR_EL2_RES0, res1);
set_sysreg_masks(kvm, HFGWTR_EL2, res0 | __HFGWTR_EL2_RES0, res1);
/* HDFG[RW]TR_EL2 */
res0 = res1 = 0;
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
res0 |= HDFGRTR_EL2_OSDLR_EL1;
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
res0 |= (HDFGRTR_EL2_PMEVCNTRn_EL0 | HDFGRTR_EL2_PMEVTYPERn_EL0 |
HDFGRTR_EL2_PMCCFILTR_EL0 | HDFGRTR_EL2_PMCCNTR_EL0 |
HDFGRTR_EL2_PMCNTEN | HDFGRTR_EL2_PMINTEN |
HDFGRTR_EL2_PMOVS | HDFGRTR_EL2_PMSELR_EL0 |
HDFGRTR_EL2_PMMIR_EL1 | HDFGRTR_EL2_PMUSERENR_EL0 |
HDFGRTR_EL2_PMCEIDn_EL0);
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
res0 |= (HDFGRTR_EL2_PMBLIMITR_EL1 | HDFGRTR_EL2_PMBPTR_EL1 |
HDFGRTR_EL2_PMBSR_EL1 | HDFGRTR_EL2_PMSCR_EL1 |
HDFGRTR_EL2_PMSEVFR_EL1 | HDFGRTR_EL2_PMSFCR_EL1 |
HDFGRTR_EL2_PMSICR_EL1 | HDFGRTR_EL2_PMSIDR_EL1 |
HDFGRTR_EL2_PMSIRR_EL1 | HDFGRTR_EL2_PMSLATFR_EL1 |
HDFGRTR_EL2_PMBIDR_EL1);
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
res0 |= (HDFGRTR_EL2_TRC | HDFGRTR_EL2_TRCAUTHSTATUS |
HDFGRTR_EL2_TRCAUXCTLR | HDFGRTR_EL2_TRCCLAIM |
HDFGRTR_EL2_TRCCNTVRn | HDFGRTR_EL2_TRCID |
HDFGRTR_EL2_TRCIMSPECn | HDFGRTR_EL2_TRCOSLSR |
HDFGRTR_EL2_TRCPRGCTLR | HDFGRTR_EL2_TRCSEQSTR |
HDFGRTR_EL2_TRCSSCSRn | HDFGRTR_EL2_TRCSTATR |
HDFGRTR_EL2_TRCVICTLR);
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
res0 |= (HDFGRTR_EL2_TRBBASER_EL1 | HDFGRTR_EL2_TRBIDR_EL1 |
HDFGRTR_EL2_TRBLIMITR_EL1 | HDFGRTR_EL2_TRBMAR_EL1 |
HDFGRTR_EL2_TRBPTR_EL1 | HDFGRTR_EL2_TRBSR_EL1 |
HDFGRTR_EL2_TRBTRG_EL1);
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
res0 |= (HDFGRTR_EL2_nBRBIDR | HDFGRTR_EL2_nBRBCTL |
HDFGRTR_EL2_nBRBDATA);
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
res0 |= HDFGRTR_EL2_nPMSNEVFR_EL1;
set_sysreg_masks(kvm, HDFGRTR_EL2, res0 | HDFGRTR_EL2_RES0, res1);
/* Reuse the bits from the read-side and add the write-specific stuff */
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
res0 |= (HDFGWTR_EL2_PMCR_EL0 | HDFGWTR_EL2_PMSWINC_EL0);
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
res0 |= HDFGWTR_EL2_TRCOSLAR;
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
res0 |= HDFGWTR_EL2_TRFCR_EL1;
set_sysreg_masks(kvm, HFGWTR_EL2, res0 | HDFGWTR_EL2_RES0, res1);
/* HFGITR_EL2 */
res0 = HFGITR_EL2_RES0;
res1 = HFGITR_EL2_RES1;
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, DPB2))
res0 |= HFGITR_EL2_DCCVADP;
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
res0 |= (HFGITR_EL2_ATS1E1RP | HFGITR_EL2_ATS1E1WP);
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
res0 |= (HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS |
HFGITR_EL2_TLBIVAALE1OS | HFGITR_EL2_TLBIVALE1OS |
HFGITR_EL2_TLBIVAAE1OS | HFGITR_EL2_TLBIASIDE1OS |
HFGITR_EL2_TLBIVAE1OS | HFGITR_EL2_TLBIVMALLE1OS);
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
res0 |= (HFGITR_EL2_TLBIRVAALE1 | HFGITR_EL2_TLBIRVALE1 |
HFGITR_EL2_TLBIRVAAE1 | HFGITR_EL2_TLBIRVAE1 |
HFGITR_EL2_TLBIRVAALE1IS | HFGITR_EL2_TLBIRVALE1IS |
HFGITR_EL2_TLBIRVAAE1IS | HFGITR_EL2_TLBIRVAE1IS |
HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS);
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP))
res0 |= (HFGITR_EL2_CFPRCTX | HFGITR_EL2_DVPRCTX |
HFGITR_EL2_CPPRCTX);
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
res0 |= (HFGITR_EL2_nBRBINJ | HFGITR_EL2_nBRBIALL);
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
res0 |= (HFGITR_EL2_nGCSPUSHM_EL1 | HFGITR_EL2_nGCSSTR_EL1 |
HFGITR_EL2_nGCSEPP);
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX))
res0 |= HFGITR_EL2_COSPRCTX;
if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
res0 |= HFGITR_EL2_ATS1E1A;
set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
/* HAFGRTR_EL2 - not a lot to see here */
res0 = HAFGRTR_EL2_RES0;
res1 = HAFGRTR_EL2_RES1;
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
res0 |= ~(res0 | res1);
set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
out:
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->arch.config_lock);
return 0; return ret;
} }
...@@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm) ...@@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
{ {
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 | u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
kvm_pmu_event_mask(kvm); kvm_pmu_event_mask(kvm);
u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0)) if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
mask |= ARMV8_PMU_INCLUDE_EL2; mask |= ARMV8_PMU_INCLUDE_EL2;
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0)) if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
mask |= ARMV8_PMU_EXCLUDE_NS_EL0 | mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
ARMV8_PMU_EXCLUDE_NS_EL1 | ARMV8_PMU_EXCLUDE_NS_EL1 |
ARMV8_PMU_EXCLUDE_EL3; ARMV8_PMU_EXCLUDE_EL3;
...@@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm) ...@@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
*/ */
static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
{ {
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
return (pmc->idx == ARMV8_PMU_CYCLE_IDX || return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc))); kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
} }
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
...@@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) ...@@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
return; return;
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */ /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
if (!kvm_pmu_is_3p5(vcpu)) if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP; val &= ~ARMV8_PMU_PMCR_LP;
/* The reset bits don't indicate any state, and shouldn't be saved. */ /* The reset bits don't indicate any state, and shouldn't be saved. */
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/bsearch.h> #include <linux/bsearch.h>
#include <linux/cacheinfo.h> #include <linux/cacheinfo.h>
#include <linux/debugfs.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/printk.h> #include <linux/printk.h>
...@@ -31,6 +32,7 @@ ...@@ -31,6 +32,7 @@
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#include "check-res-bits.h"
#include "sys_regs.h" #include "sys_regs.h"
#include "trace.h" #include "trace.h"
...@@ -505,10 +507,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, ...@@ -505,10 +507,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r); u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) { if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
return false; return false;
} }
...@@ -2197,16 +2198,6 @@ static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) ...@@ -2197,16 +2198,6 @@ static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
* guest... * guest...
*/ */
static const struct sys_reg_desc sys_reg_descs[] = { static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
DBG_BCR_BVR_WCR_WVR_EL1(0), DBG_BCR_BVR_WCR_WVR_EL1(0),
DBG_BCR_BVR_WCR_WVR_EL1(1), DBG_BCR_BVR_WCR_WVR_EL1(1),
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
...@@ -2738,6 +2729,18 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -2738,6 +2729,18 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(SP_EL2, NULL, reset_unknown, 0), EL2_REG(SP_EL2, NULL, reset_unknown, 0),
}; };
static struct sys_reg_desc sys_insn_descs[] = {
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
};
static const struct sys_reg_desc *first_idreg; static const struct sys_reg_desc *first_idreg;
static bool trap_dbgdidr(struct kvm_vcpu *vcpu, static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
...@@ -2748,8 +2751,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, ...@@ -2748,8 +2751,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
return ignore_write(vcpu, p); return ignore_write(vcpu, p);
} else { } else {
u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) | (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
...@@ -3395,12 +3397,6 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu) ...@@ -3395,12 +3397,6 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs)); return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
} }
static bool is_imp_def_sys_reg(struct sys_reg_params *params)
{
// See ARM DDI 0487E.a, section D12.3.2
return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
}
/** /**
* emulate_sys_reg - Emulate a guest access to an AArch64 system register * emulate_sys_reg - Emulate a guest access to an AArch64 system register
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
...@@ -3414,23 +3410,103 @@ static bool emulate_sys_reg(struct kvm_vcpu *vcpu, ...@@ -3414,23 +3410,103 @@ static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r; const struct sys_reg_desc *r;
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
if (likely(r)) { if (likely(r)) {
perform_access(vcpu, params, r); perform_access(vcpu, params, r);
return true; return true;
} }
if (is_imp_def_sys_reg(params)) {
kvm_inject_undefined(vcpu);
} else {
print_sys_reg_msg(params, print_sys_reg_msg(params,
"Unsupported guest sys_reg access at: %lx [%08lx]\n", "Unsupported guest sys_reg access at: %lx [%08lx]\n",
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu)); *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
}
return false; return false;
} }
static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
{
struct kvm *kvm = s->private;
u8 *iter;
mutex_lock(&kvm->arch.config_lock);
iter = &kvm->arch.idreg_debugfs_iter;
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
*iter == (u8)~0) {
*iter = *pos;
if (*iter >= KVM_ARM_ID_REG_NUM)
iter = NULL;
} else {
iter = ERR_PTR(-EBUSY);
}
mutex_unlock(&kvm->arch.config_lock);
return iter;
}
static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
{
struct kvm *kvm = s->private;
(*pos)++;
if ((kvm->arch.idreg_debugfs_iter + 1) < KVM_ARM_ID_REG_NUM) {
kvm->arch.idreg_debugfs_iter++;
return &kvm->arch.idreg_debugfs_iter;
}
return NULL;
}
static void idregs_debug_stop(struct seq_file *s, void *v)
{
struct kvm *kvm = s->private;
if (IS_ERR(v))
return;
mutex_lock(&kvm->arch.config_lock);
kvm->arch.idreg_debugfs_iter = ~0;
mutex_unlock(&kvm->arch.config_lock);
}
static int idregs_debug_show(struct seq_file *s, void *v)
{
struct kvm *kvm = s->private;
const struct sys_reg_desc *desc;
desc = first_idreg + kvm->arch.idreg_debugfs_iter;
if (!desc->name)
return 0;
seq_printf(s, "%20s:\t%016llx\n",
desc->name, IDREG(kvm, IDX_IDREG(kvm->arch.idreg_debugfs_iter)));
return 0;
}
static const struct seq_operations idregs_debug_sops = {
.start = idregs_debug_start,
.next = idregs_debug_next,
.stop = idregs_debug_stop,
.show = idregs_debug_show,
};
DEFINE_SEQ_ATTRIBUTE(idregs_debug);
void kvm_sys_regs_create_debugfs(struct kvm *kvm)
{
kvm->arch.idreg_debugfs_iter = ~0;
debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
&idregs_debug_fops);
}
static void kvm_reset_id_regs(struct kvm_vcpu *vcpu) static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
{ {
const struct sys_reg_desc *idreg = first_idreg; const struct sys_reg_desc *idreg = first_idreg;
...@@ -3478,28 +3554,39 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) ...@@ -3478,28 +3554,39 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
} }
/** /**
* kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
* trap on a guest execution
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
*/ */
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
{ {
const struct sys_reg_desc *desc = NULL;
struct sys_reg_params params; struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_esr(vcpu); unsigned long esr = kvm_vcpu_get_esr(vcpu);
int Rt = kvm_vcpu_sys_get_rt(vcpu); int Rt = kvm_vcpu_sys_get_rt(vcpu);
int sr_idx;
trace_kvm_handle_sys_reg(esr); trace_kvm_handle_sys_reg(esr);
if (__check_nv_sr_forward(vcpu)) if (triage_sysreg_trap(vcpu, &sr_idx))
return 1; return 1;
params = esr_sys64_to_params(esr); params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt); params.regval = vcpu_get_reg(vcpu, Rt);
if (!emulate_sys_reg(vcpu, &params)) /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
return 1; if (params.Op0 == 2 || params.Op0 == 3)
desc = &sys_reg_descs[sr_idx];
else
desc = &sys_insn_descs[sr_idx];
if (!params.is_write) perform_access(vcpu, &params, desc);
/* Read from system register? */
if (!params.is_write &&
(params.Op0 == 2 || params.Op0 == 3))
vcpu_set_reg(vcpu, Rt, params.regval); vcpu_set_reg(vcpu, Rt, params.regval);
return 1; return 1;
} }
...@@ -3941,11 +4028,86 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range * ...@@ -3941,11 +4028,86 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
return 0; return 0;
} }
void kvm_init_sysreg(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
mutex_lock(&kvm->arch.config_lock);
/*
* In the absence of FGT, we cannot independently trap TLBI
* Range instructions. This isn't great, but trapping all
* TLBIs would be far worse. Live with it...
*/
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
vcpu->arch.hcr_el2 |= HCR_TTLBOS;
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
vcpu->arch.hcrx_el2 = HCRX_GUEST_FLAGS;
if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
}
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
goto out;
kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
HFGxTR_EL2_nMAIR2_EL1 |
HFGxTR_EL2_nS2POR_EL1 |
HFGxTR_EL2_nPOR_EL1 |
HFGxTR_EL2_nPOR_EL0 |
HFGxTR_EL2_nACCDATA_EL1 |
HFGxTR_EL2_nSMPRI_EL1_MASK |
HFGxTR_EL2_nTPIDR2_EL0_MASK);
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
HFGITR_EL2_TLBIRVALE1OS |
HFGITR_EL2_TLBIRVAAE1OS |
HFGITR_EL2_TLBIRVAE1OS |
HFGITR_EL2_TLBIVAALE1OS |
HFGITR_EL2_TLBIVALE1OS |
HFGITR_EL2_TLBIVAAE1OS |
HFGITR_EL2_TLBIASIDE1OS |
HFGITR_EL2_TLBIVAE1OS |
HFGITR_EL2_TLBIVMALLE1OS);
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 |
HFGITR_EL2_TLBIRVALE1 |
HFGITR_EL2_TLBIRVAAE1 |
HFGITR_EL2_TLBIRVAE1 |
HFGITR_EL2_TLBIRVAALE1IS|
HFGITR_EL2_TLBIRVALE1IS |
HFGITR_EL2_TLBIRVAAE1IS |
HFGITR_EL2_TLBIRVAE1IS |
HFGITR_EL2_TLBIRVAALE1OS|
HFGITR_EL2_TLBIRVALE1OS |
HFGITR_EL2_TLBIRVAAE1OS |
HFGITR_EL2_TLBIRVAE1OS);
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
HFGxTR_EL2_nPIR_EL1);
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
HAFGRTR_EL2_RES1);
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
out:
mutex_unlock(&kvm->arch.config_lock);
}
int __init kvm_sys_reg_table_init(void) int __init kvm_sys_reg_table_init(void)
{ {
struct sys_reg_params params; struct sys_reg_params params;
bool valid = true; bool valid = true;
unsigned int i; unsigned int i;
int ret = 0;
check_res_bits();
/* Make sure tables are unique and in order. */ /* Make sure tables are unique and in order. */
valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false); valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
...@@ -3954,6 +4116,7 @@ int __init kvm_sys_reg_table_init(void) ...@@ -3954,6 +4116,7 @@ int __init kvm_sys_reg_table_init(void)
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true); valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true); valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false); valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
if (!valid) if (!valid)
return -EINVAL; return -EINVAL;
...@@ -3968,8 +4131,13 @@ int __init kvm_sys_reg_table_init(void) ...@@ -3968,8 +4131,13 @@ int __init kvm_sys_reg_table_init(void)
if (!first_idreg) if (!first_idreg)
return -EINVAL; return -EINVAL;
if (kvm_get_mode() == KVM_MODE_NV) ret = populate_nv_trap_config();
return populate_nv_trap_config();
return 0; for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
ret = populate_sysreg_config(sys_reg_descs + i, i);
for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
ret = populate_sysreg_config(sys_insn_descs + i, i);
return ret;
} }
...@@ -233,6 +233,8 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, ...@@ -233,6 +233,8 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
const struct sys_reg_desc table[], unsigned int num); const struct sys_reg_desc table[], unsigned int num);
bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
#define AA32(_x) .aarch32_map = AA32_##_x #define AA32(_x) .aarch32_map = AA32_##_x
#define Op0(_x) .Op0 = _x #define Op0(_x) .Op0 = _x
#define Op1(_x) .Op1 = _x #define Op1(_x) .Op1 = _x
......
...@@ -1366,6 +1366,7 @@ EndEnum ...@@ -1366,6 +1366,7 @@ EndEnum
UnsignedEnum 43:40 SPECRES UnsignedEnum 43:40 SPECRES
0b0000 NI 0b0000 NI
0b0001 IMP 0b0001 IMP
0b0010 COSP_RCTX
EndEnum EndEnum
UnsignedEnum 39:36 SB UnsignedEnum 39:36 SB
0b0000 NI 0b0000 NI
...@@ -1492,7 +1493,12 @@ EndEnum ...@@ -1492,7 +1493,12 @@ EndEnum
EndSysreg EndSysreg
Sysreg ID_AA64ISAR3_EL1 3 0 0 6 3 Sysreg ID_AA64ISAR3_EL1 3 0 0 6 3
Res0 63:12 Res0 63:16
UnsignedEnum 15:12 PACM
0b0000 NI
0b0001 TRIVIAL_IMP
0b0010 FULL_IMP
EndEnum
UnsignedEnum 11:8 TLBIW UnsignedEnum 11:8 TLBIW
0b0000 NI 0b0000 NI
0b0001 IMP 0b0001 IMP
......
...@@ -90,16 +90,6 @@ void kvm_vcpu_pmu_resync_el0(void); ...@@ -90,16 +90,6 @@ void kvm_vcpu_pmu_resync_el0(void);
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0) } while (0)
/*
* Evaluates as true when emulating PMUv3p5, and false otherwise.
*/
#define kvm_pmu_is_3p5(vcpu) ({ \
u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
\
pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
})
u8 kvm_arm_pmu_get_pmuver_limit(void); u8 kvm_arm_pmu_get_pmuver_limit(void);
u64 kvm_pmu_evtyper_mask(struct kvm *kvm); u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
int kvm_arm_set_default_pmu(struct kvm *kvm); int kvm_arm_set_default_pmu(struct kvm *kvm);
...@@ -168,7 +158,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -168,7 +158,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
} }
#define kvm_vcpu_has_pmu(vcpu) ({ false; }) #define kvm_vcpu_has_pmu(vcpu) ({ false; })
#define kvm_pmu_is_3p5(vcpu) ({ false; })
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment