Commit 9ec19493 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: clear SMM flags before loading state while leaving SMM

RSM emulation is currently broken on VMX when the interrupted guest has
CR4.VMXE=1.  Stop dancing around the issue of HF_SMM_MASK being set when
loading SMSTATE into architectural state, e.g. by toggling it for
problematic flows, and simply clear HF_SMM_MASK prior to loading
architectural state (from SMRAM save state area).
Reported-by: default avatarJon Doron <arilou@gmail.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Fixes: 5bea5123 ("KVM: VMX: check nested state and CR4.VMXE against SMM")
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Tested-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c5833c7a
...@@ -2571,6 +2571,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) ...@@ -2571,6 +2571,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
if (ret != X86EMUL_CONTINUE) if (ret != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE; return X86EMUL_UNHANDLEABLE;
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
/* /*
* Get back to real mode, to prepare a safe state in which to load * Get back to real mode, to prepare a safe state in which to load
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
...@@ -2624,12 +2630,6 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) ...@@ -2624,12 +2630,6 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
return X86EMUL_UNHANDLEABLE; return X86EMUL_UNHANDLEABLE;
} }
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
ctxt->ops->post_leave_smm(ctxt); ctxt->ops->post_leave_smm(ctxt);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
......
...@@ -6239,21 +6239,17 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -6239,21 +6239,17 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
struct page *page; struct page *page;
u64 guest; u64 guest;
u64 vmcb; u64 vmcb;
int ret;
guest = GET_SMSTATE(u64, smstate, 0x7ed8); guest = GET_SMSTATE(u64, smstate, 0x7ed8);
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (guest) { if (guest) {
vcpu->arch.hflags &= ~HF_SMM_MASK;
nested_vmcb = nested_svm_map(svm, vmcb, &page); nested_vmcb = nested_svm_map(svm, vmcb, &page);
if (nested_vmcb) if (!nested_vmcb)
enter_svm_guest_mode(svm, vmcb, nested_vmcb, page); return 1;
else enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
ret = 1;
vcpu->arch.hflags |= HF_SMM_MASK;
} }
return ret; return 0;
} }
static int enable_smi_window(struct kvm_vcpu *vcpu) static int enable_smi_window(struct kvm_vcpu *vcpu)
......
...@@ -7409,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) ...@@ -7409,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
} }
if (vmx->nested.smm.guest_mode) { if (vmx->nested.smm.guest_mode) {
vcpu->arch.hflags &= ~HF_SMM_MASK;
ret = nested_vmx_enter_non_root_mode(vcpu, false); ret = nested_vmx_enter_non_root_mode(vcpu, false);
vcpu->arch.hflags |= HF_SMM_MASK;
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment