Commit 761e4169 authored by Krish Sadhukhan's avatar Krish Sadhukhan Committed by Paolo Bonzini

KVM: nSVM: Check that MBZ bits in CR3 and CR4 are not set on vmrun of nested guests

According to section "Canonicalization and Consistency Checks" in APM vol. 2
the following guest state is illegal:

    "Any MBZ bit of CR3 is set."
    "Any MBZ bit of CR4 is set."
Suggeted-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarKrish Sadhukhan <krish.sadhukhan@oracle.com>
Message-Id: <1594168797-29444-3-git-send-email-krish.sadhukhan@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 53efe527
...@@ -222,8 +222,9 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control) ...@@ -222,8 +222,9 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
return true; return true;
} }
static bool nested_vmcb_checks(struct vmcb *vmcb) static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb)
{ {
bool nested_vmcb_lma;
if ((vmcb->save.efer & EFER_SVME) == 0) if ((vmcb->save.efer & EFER_SVME) == 0)
return false; return false;
...@@ -234,6 +235,27 @@ static bool nested_vmcb_checks(struct vmcb *vmcb) ...@@ -234,6 +235,27 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7)) if (!kvm_dr6_valid(vmcb->save.dr6) || !kvm_dr7_valid(vmcb->save.dr7))
return false; return false;
nested_vmcb_lma =
(vmcb->save.efer & EFER_LME) &&
(vmcb->save.cr0 & X86_CR0_PG);
if (!nested_vmcb_lma) {
if (vmcb->save.cr4 & X86_CR4_PAE) {
if (vmcb->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
return false;
} else {
if (vmcb->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
return false;
}
} else {
if (!(vmcb->save.cr4 & X86_CR4_PAE) ||
!(vmcb->save.cr0 & X86_CR0_PE) ||
(vmcb->save.cr3 & MSR_CR3_LONG_RESERVED_MASK))
return false;
}
if (kvm_valid_cr4(&svm->vcpu, vmcb->save.cr4))
return false;
return nested_vmcb_check_controls(&vmcb->control); return nested_vmcb_check_controls(&vmcb->control);
} }
...@@ -419,7 +441,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm) ...@@ -419,7 +441,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
nested_vmcb = map.hva; nested_vmcb = map.hva;
if (!nested_vmcb_checks(nested_vmcb)) { if (!nested_vmcb_checks(svm, nested_vmcb)) {
nested_vmcb->control.exit_code = SVM_EXIT_ERR; nested_vmcb->control.exit_code = SVM_EXIT_ERR;
nested_vmcb->control.exit_code_hi = 0; nested_vmcb->control.exit_code_hi = 0;
nested_vmcb->control.exit_info_1 = 0; nested_vmcb->control.exit_info_1 = 0;
......
...@@ -343,7 +343,10 @@ static inline bool gif_set(struct vcpu_svm *svm) ...@@ -343,7 +343,10 @@ static inline bool gif_set(struct vcpu_svm *svm)
} }
/* svm.c */ /* svm.c */
#define MSR_INVALID 0xffffffffU #define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
#define MSR_CR3_LONG_RESERVED_MASK 0xfff0000000000fe7U
#define MSR_INVALID 0xffffffffU
u32 svm_msrpm_offset(u32 msr); u32 svm_msrpm_offset(u32 msr);
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
......
...@@ -955,7 +955,7 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) ...@@ -955,7 +955,7 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
} }
EXPORT_SYMBOL_GPL(kvm_set_xcr); EXPORT_SYMBOL_GPL(kvm_set_xcr);
static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
if (cr4 & cr4_reserved_bits) if (cr4 & cr4_reserved_bits)
return -EINVAL; return -EINVAL;
...@@ -965,6 +965,7 @@ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -965,6 +965,7 @@ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(kvm_valid_cr4);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
......
...@@ -369,6 +369,7 @@ static inline bool kvm_dr6_valid(u64 data) ...@@ -369,6 +369,7 @@ static inline bool kvm_dr6_valid(u64 data)
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu); u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
#define KVM_MSR_RET_INVALID 2 #define KVM_MSR_RET_INVALID 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment