Commit b899c132 authored by Krish Sadhukhan's avatar Krish Sadhukhan Committed by Paolo Bonzini

KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid()

Instead of creating the mask for guest CR4 reserved bits in kvm_valid_cr4(),
do it in kvm_update_cpuid() so that it can be reused instead of creating it
each time kvm_valid_cr4() is called.
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarKrish Sadhukhan <krish.sadhukhan@oracle.com>
Message-Id: <1594168797-29444-2-git-send-email-krish.sadhukhan@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d42e3fae
...@@ -545,6 +545,7 @@ struct kvm_vcpu_arch { ...@@ -545,6 +545,7 @@ struct kvm_vcpu_arch {
unsigned long cr3; unsigned long cr3;
unsigned long cr4; unsigned long cr4;
unsigned long cr4_guest_owned_bits; unsigned long cr4_guest_owned_bits;
unsigned long cr4_guest_rsvd_bits;
unsigned long cr8; unsigned long cr8;
u32 host_pkru; u32 host_pkru;
u32 pkru; u32 pkru;
......
...@@ -128,6 +128,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) ...@@ -128,6 +128,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
kvm_pmu_refresh(vcpu); kvm_pmu_refresh(vcpu);
vcpu->arch.cr4_guest_rsvd_bits =
__cr4_reserved_bits(guest_cpuid_has, vcpu);
return 0; return 0;
} }
......
...@@ -955,33 +955,12 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) ...@@ -955,33 +955,12 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
} }
EXPORT_SYMBOL_GPL(kvm_set_xcr); EXPORT_SYMBOL_GPL(kvm_set_xcr);
#define __cr4_reserved_bits(__cpu_has, __c) \
({ \
u64 __reserved_bits = CR4_RESERVED_BITS; \
\
if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
__reserved_bits |= X86_CR4_OSXSAVE; \
if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
__reserved_bits |= X86_CR4_SMEP; \
if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
__reserved_bits |= X86_CR4_SMAP; \
if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
__reserved_bits |= X86_CR4_FSGSBASE; \
if (!__cpu_has(__c, X86_FEATURE_PKU)) \
__reserved_bits |= X86_CR4_PKE; \
if (!__cpu_has(__c, X86_FEATURE_LA57)) \
__reserved_bits |= X86_CR4_LA57; \
if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
__reserved_bits |= X86_CR4_UMIP; \
__reserved_bits; \
})
static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
if (cr4 & cr4_reserved_bits) if (cr4 & cr4_reserved_bits)
return -EINVAL; return -EINVAL;
if (cr4 & __cr4_reserved_bits(guest_cpuid_has, vcpu)) if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
return -EINVAL; return -EINVAL;
return 0; return 0;
......
...@@ -373,4 +373,25 @@ bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); ...@@ -373,4 +373,25 @@ bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
#define KVM_MSR_RET_INVALID 2 #define KVM_MSR_RET_INVALID 2
#define __cr4_reserved_bits(__cpu_has, __c) \
({ \
u64 __reserved_bits = CR4_RESERVED_BITS; \
\
if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
__reserved_bits |= X86_CR4_OSXSAVE; \
if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
__reserved_bits |= X86_CR4_SMEP; \
if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
__reserved_bits |= X86_CR4_SMAP; \
if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
__reserved_bits |= X86_CR4_FSGSBASE; \
if (!__cpu_has(__c, X86_FEATURE_PKU)) \
__reserved_bits |= X86_CR4_PKE; \
if (!__cpu_has(__c, X86_FEATURE_LA57)) \
__reserved_bits |= X86_CR4_LA57; \
if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
__reserved_bits |= X86_CR4_UMIP; \
__reserved_bits; \
})
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment