Commit c1c35cf7 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: cleanup CR3 reserved bits checks

If not in long mode, the low bits of CR3 are reserved but not enforced to
be zero, so remove those checks.  If in long mode, however, the MBZ bits
extend down to the highest physical address bit of the guest, excluding
the encryption bit.

Make the checks consistent with the above, and match them between
nested_vmcb_checks and KVM_SET_SREGS.

Cc: stable@vger.kernel.org
Fixes: 761e4169 ("KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests")
Fixes: a780a3ea ("KVM: X86: Fix reserved bits check for MOV to CR3")
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ccd85d90
...@@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control) ...@@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma; bool vmcb12_lma;
if ((vmcb12->save.efer & EFER_SVME) == 0) if ((vmcb12->save.efer & EFER_SVME) == 0)
...@@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) ...@@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG); vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
if (!vmcb12_lma) { if (vmcb12_lma) {
if (vmcb12->save.cr4 & X86_CR4_PAE) {
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
return false;
} else {
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
return false;
}
} else {
if (!(vmcb12->save.cr4 & X86_CR4_PAE) || if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
!(vmcb12->save.cr0 & X86_CR0_PE) || !(vmcb12->save.cr0 & X86_CR0_PE) ||
(vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK)) (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
return false; return false;
} }
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
......
...@@ -403,9 +403,6 @@ static inline bool gif_set(struct vcpu_svm *svm) ...@@ -403,9 +403,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
} }
/* svm.c */ /* svm.c */
#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
#define MSR_INVALID 0xffffffffU #define MSR_INVALID 0xffffffffU
extern int sev; extern int sev;
......
...@@ -9624,6 +9624,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) ...@@ -9624,6 +9624,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
*/ */
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
return false; return false;
if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
return false;
} else { } else {
/* /*
* Not in 64-bit mode: EFER.LMA is clear and the code * Not in 64-bit mode: EFER.LMA is clear and the code
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment