Commit cf8b84f4 authored by Jim Mattson's avatar Jim Mattson Committed by Paolo Bonzini

kvm: nVMX: Prepare for checkpointing L2 state

Split prepare_vmcs12 into two parts: the part that stores the current L2
guest state and the part that sets up the exit information fields. The
former will be used when checkpointing the vCPU's VMX state.

Modify prepare_vmcs02 so that it can construct a vmcs02 midway through
L2 execution, using the checkpointed L2 guest state saved into the
cached vmcs12 above.
Signed-off-by: default avatarJim Mattson <jmattson@google.com>
[Rebasing: add from_vmentry argument to prepare_vmcs02 instead of using
 vmx->nested.nested_run_pending, because it is no longer 1 at the
 point prepare_vmcs02 is called. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b95234c8
...@@ -10040,7 +10040,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne ...@@ -10040,7 +10040,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
* is assigned to entry_failure_code on failure. * is assigned to entry_failure_code on failure.
*/ */
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
unsigned long *entry_failure_code) bool from_vmentry, unsigned long *entry_failure_code)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control; u32 exec_control;
...@@ -10083,13 +10083,15 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10083,13 +10083,15 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
} else { } else {
kvm_set_dr(vcpu, 7, vcpu->arch.dr7); kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
} }
if (from_vmentry) {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
vmcs12->vm_entry_intr_info_field); vmcs12->vm_entry_intr_info_field);
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
...@@ -10098,6 +10100,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10098,6 +10100,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->vm_entry_instruction_len); vmcs12->vm_entry_instruction_len);
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
vmcs12->guest_interruptibility_info); vmcs12->guest_interruptibility_info);
} else {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
vmx_set_rflags(vcpu, vmcs12->guest_rflags); vmx_set_rflags(vcpu, vmcs12->guest_rflags);
vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
...@@ -10290,16 +10295,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10290,16 +10295,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
~VM_ENTRY_IA32E_MODE) | ~VM_ENTRY_IA32E_MODE) |
(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) { if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
vcpu->arch.pat = vmcs12->guest_ia32_pat; vcpu->arch.pat = vmcs12->guest_ia32_pat;
} else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
}
set_cr4_guest_host_mask(vmx); set_cr4_guest_host_mask(vmx);
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) if (from_vmentry &&
vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
...@@ -10351,7 +10358,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10351,7 +10358,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmx_set_cr4(vcpu, vmcs12->guest_cr4); vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
vcpu->arch.efer = vmcs12->guest_ia32_efer; vcpu->arch.efer = vmcs12->guest_ia32_efer;
else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
vcpu->arch.efer |= (EFER_LMA | EFER_LME); vcpu->arch.efer |= (EFER_LMA | EFER_LME);
...@@ -10561,7 +10569,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) ...@@ -10561,7 +10569,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx_segment_cache_clear(vmx); vmx_segment_cache_clear(vmx);
if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) { if (prepare_vmcs02(vcpu, vmcs12, true, &exit_qualification)) {
leave_guest_mode(vcpu); leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu); vmx_load_vmcs01(vcpu);
nested_vmx_entry_failure(vcpu, vmcs12, nested_vmx_entry_failure(vcpu, vmcs12,
...@@ -10733,21 +10741,13 @@ static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) ...@@ -10733,21 +10741,13 @@ static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
} }
/* /*
* prepare_vmcs12 is part of what we need to do when the nested L2 guest exits * Update the guest state fields of vmcs12 to reflect changes that
* and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), * occurred while L2 was running. (The "IA-32e mode guest" bit of the
* and this function updates it to reflect the changes to the guest state while * VM-entry controls is also updated, since this is really a guest
* L2 was running (and perhaps made some exits which were handled directly by L0 * state bit.)
* without going back to L1), and to reflect the exit reason.
* Note that we do not have to copy here all VMCS fields, just those that
* could have changed by the L2 guest or the exit - i.e., the guest-state and
* exit-information fields only. Other fields are modified by L1 with VMWRITE,
* which already writes to vmcs12 directly.
*/ */
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
u32 exit_reason, u32 exit_intr_info,
unsigned long exit_qualification)
{ {
/* update guest state fields: */
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
...@@ -10853,6 +10853,25 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -10853,6 +10853,25 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
if (nested_cpu_has_xsaves(vmcs12)) if (nested_cpu_has_xsaves(vmcs12))
vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
}
/*
* prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
* and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
* and this function updates it to reflect the changes to the guest state while
* L2 was running (and perhaps made some exits which were handled directly by L0
* without going back to L1), and to reflect the exit reason.
* Note that we do not have to copy here all VMCS fields, just those that
* could have changed by the L2 guest or the exit - i.e., the guest-state and
* exit-information fields only. Other fields are modified by L1 with VMWRITE,
* which already writes to vmcs12 directly.
*/
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
u32 exit_reason, u32 exit_intr_info,
unsigned long exit_qualification)
{
/* update guest state fields: */
sync_vmcs12(vcpu, vmcs12);
/* update exit information fields: */ /* update exit information fields: */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment