Commit 4c0680d3 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Move vcpu configuration flags into their own set

The KVM_ARM64_{GUEST_HAS_SVE,VCPU_SVE_FINALIZED,GUEST_HAS_PTRAUTH}
flags are purely configuration flags. Once set, they are never cleared,
but evaluated all over the code base.

Move these three flags into the configuration set in one go, using
the new accessors, and take this opportunity to drop the KVM_ARM64_
prefix which doesn't provide any help.
Reviewed-by: default avatarFuad Tabba <tabba@google.com>
Reviewed-by: default avatarReiji Watanabe <reijiw@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 690bacb8
...@@ -467,6 +467,13 @@ struct kvm_vcpu_arch { ...@@ -467,6 +467,13 @@ struct kvm_vcpu_arch {
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__) #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__) #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
/* SVE exposed to guest */
#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
/* SVE config completed */
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
/* PTRAUTH exposed to guest */
#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \ #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
...@@ -491,9 +498,6 @@ struct kvm_vcpu_arch { ...@@ -491,9 +498,6 @@ struct kvm_vcpu_arch {
/* vcpu_arch flags field values: */ /* vcpu_arch flags field values: */
#define KVM_ARM64_DEBUG_DIRTY (1 << 0) #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
/* /*
* Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
...@@ -530,13 +534,13 @@ struct kvm_vcpu_arch { ...@@ -530,13 +534,13 @@ struct kvm_vcpu_arch {
KVM_GUESTDBG_SINGLESTEP) KVM_GUESTDBG_SINGLESTEP)
#define vcpu_has_sve(vcpu) (system_supports_sve() && \ #define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) vcpu_get_flag(vcpu, GUEST_HAS_SVE))
#ifdef CONFIG_ARM64_PTR_AUTH #ifdef CONFIG_ARM64_PTR_AUTH
#define vcpu_has_ptrauth(vcpu) \ #define vcpu_has_ptrauth(vcpu) \
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
(vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
#else #else
#define vcpu_has_ptrauth(vcpu) false #define vcpu_has_ptrauth(vcpu) false
#endif #endif
...@@ -893,8 +897,7 @@ void kvm_init_protected_traps(struct kvm_vcpu *vcpu); ...@@ -893,8 +897,7 @@ void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \ #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
#define kvm_has_mte(kvm) \ #define kvm_has_mte(kvm) \
(system_supports_mte() && \ (system_supports_mte() && \
......
...@@ -81,7 +81,7 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) ...@@ -81,7 +81,7 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
* KVM_REG_ARM64_SVE_VLS. Allocation is deferred until * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
* kvm_arm_vcpu_finalize(), which freezes the configuration. * kvm_arm_vcpu_finalize(), which freezes the configuration.
*/ */
vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; vcpu_set_flag(vcpu, GUEST_HAS_SVE);
return 0; return 0;
} }
...@@ -120,7 +120,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) ...@@ -120,7 +120,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
} }
vcpu->arch.sve_state = buf; vcpu->arch.sve_state = buf;
vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
return 0; return 0;
} }
...@@ -177,7 +177,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) ...@@ -177,7 +177,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
!system_has_full_ptr_auth()) !system_has_full_ptr_auth())
return -EINVAL; return -EINVAL;
vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment