Commit 967bc679 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Greg Kroah-Hartman

KVM: nVMX: do not use dangling shadow VMCS after guest reset

commit 88dddc11 upstream.

If a KVM guest is reset while running a nested guest, free_nested will
disable the shadow VMCS execution control in the vmcs01.  However,
on the next KVM_RUN vmx_vcpu_run would nevertheless try to sync
the VMCS12 to the shadow VMCS which has since been freed.

This causes a vmptrld of a NULL pointer on my machime, but Jan reports
the host to hang altogether.  Let's see how much this trivial patch fixes.
Reported-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Cc: Liran Alon <liran.alon@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3a17ca86
...@@ -8457,6 +8457,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) ...@@ -8457,6 +8457,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
{ {
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER, -1ull); vmcs_write64(VMCS_LINK_POINTER, -1ull);
vmx->nested.sync_shadow_vmcs = false;
} }
static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
...@@ -8468,7 +8469,6 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) ...@@ -8468,7 +8469,6 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
/* copy to memory all shadowed fields in case /* copy to memory all shadowed fields in case
they were modified */ they were modified */
copy_shadow_to_vmcs12(vmx); copy_shadow_to_vmcs12(vmx);
vmx->nested.sync_shadow_vmcs = false;
vmx_disable_shadow_vmcs(vmx); vmx_disable_shadow_vmcs(vmx);
} }
vmx->nested.posted_intr_nv = -1; vmx->nested.posted_intr_nv = -1;
...@@ -8668,6 +8668,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) ...@@ -8668,6 +8668,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
u64 field_value; u64 field_value;
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
if (WARN_ON(!shadow_vmcs))
return;
preempt_disable(); preempt_disable();
vmcs_load(shadow_vmcs); vmcs_load(shadow_vmcs);
...@@ -8706,6 +8709,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) ...@@ -8706,6 +8709,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
u64 field_value = 0; u64 field_value = 0;
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
if (WARN_ON(!shadow_vmcs))
return;
vmcs_load(shadow_vmcs); vmcs_load(shadow_vmcs);
for (q = 0; q < ARRAY_SIZE(fields); q++) { for (q = 0; q < ARRAY_SIZE(fields); q++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment