Commit db77548b authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Greg Kroah-Hartman

KVM: vmx, svm: always run with EFER.NXE=1 when shadow paging is active

commit 9167ab79 upstream.

VMX already does so if the host has SMEP, in order to support the combination of
CR0.WP=1 and CR4.SMEP=1.  However, it is perfectly safe to always do so, and in
fact VMX also ends up running with EFER.NXE=1 on old processors that lack the
"load EFER" controls, because it may help avoiding a slow MSR write.

SVM does not have similar code, but it should since recent AMD processors do
support SMEP.  So this patch makes the code for the two vendors simpler and
more similar, while fixing an issue with CR0.WP=1 and CR4.SMEP=1 on AMD.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 37dfbc8b
...@@ -736,8 +736,14 @@ static int get_npt_level(struct kvm_vcpu *vcpu) ...@@ -736,8 +736,14 @@ static int get_npt_level(struct kvm_vcpu *vcpu)
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
vcpu->arch.efer = efer; vcpu->arch.efer = efer;
if (!npt_enabled && !(efer & EFER_LMA))
if (!npt_enabled) {
/* Shadow paging assumes NX to be available. */
efer |= EFER_NX;
if (!(efer & EFER_LMA))
efer &= ~EFER_LME; efer &= ~EFER_LME;
}
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
......
...@@ -2785,17 +2785,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) ...@@ -2785,17 +2785,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
u64 guest_efer = vmx->vcpu.arch.efer; u64 guest_efer = vmx->vcpu.arch.efer;
u64 ignore_bits = 0; u64 ignore_bits = 0;
if (!enable_ept) { /* Shadow paging assumes NX to be available. */
/* if (!enable_ept)
* NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
* host CPUID is more efficient than testing guest CPUID
* or CR4. Host SMEP is anyway a requirement for guest SMEP.
*/
if (boot_cpu_has(X86_FEATURE_SMEP))
guest_efer |= EFER_NX; guest_efer |= EFER_NX;
else if (!(guest_efer & EFER_NX))
ignore_bits |= EFER_NX;
}
/* /*
* LMA and LME handled by hardware; SCE meaningless outside long mode. * LMA and LME handled by hardware; SCE meaningless outside long mode.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment