Commit f241d711 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: nSVM: extract preparation of VMCB for nested run

Split out filling svm->vmcb.save and svm->vmcb.control before VMRUN.
Only the latter will be useful when restoring nested SVM state.

This patch introduces no semantic change, so the MMU setup is still
done in nested_prepare_vmcb_save.  The next patch will clean up things.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3e06f016
...@@ -245,21 +245,8 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm, ...@@ -245,21 +245,8 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm,
svm->vcpu.arch.tsc_offset += control->tsc_offset; svm->vcpu.arch.tsc_offset += control->tsc_offset;
} }
void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
struct vmcb *nested_vmcb)
{ {
bool evaluate_pending_interrupts =
is_intercept(svm, INTERCEPT_VINTR) ||
is_intercept(svm, INTERCEPT_IRET);
svm->nested.vmcb = vmcb_gpa;
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
else
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
load_nested_vmcb_control(svm, &nested_vmcb->control);
if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
nested_svm_init_mmu_context(&svm->vcpu); nested_svm_init_mmu_context(&svm->vcpu);
...@@ -291,7 +278,10 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -291,7 +278,10 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vmcb->save.dr7 = nested_vmcb->save.dr7; svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
svm->vcpu.arch.dr6 = nested_vmcb->save.dr6; svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl; svm->vmcb->save.cpl = nested_vmcb->save.cpl;
}
static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
{
svm_flush_tlb(&svm->vcpu); svm_flush_tlb(&svm->vcpu);
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK; svm->vcpu.arch.hflags |= HF_VINTR_MASK;
...@@ -321,6 +311,26 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -321,6 +311,26 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
*/ */
recalc_intercepts(svm); recalc_intercepts(svm);
mark_all_dirty(svm->vmcb);
}
void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb)
{
bool evaluate_pending_interrupts =
is_intercept(svm, INTERCEPT_VINTR) ||
is_intercept(svm, INTERCEPT_IRET);
svm->nested.vmcb = vmcb_gpa;
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
else
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
load_nested_vmcb_control(svm, &nested_vmcb->control);
nested_prepare_vmcb_save(svm, nested_vmcb);
nested_prepare_vmcb_control(svm, nested_vmcb);
/* /*
* If L1 had a pending IRQ/NMI before executing VMRUN, * If L1 had a pending IRQ/NMI before executing VMRUN,
* which wasn't delivered because it was disallowed (e.g. * which wasn't delivered because it was disallowed (e.g.
...@@ -336,8 +346,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, ...@@ -336,8 +346,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
enable_gif(svm); enable_gif(svm);
if (unlikely(evaluate_pending_interrupts)) if (unlikely(evaluate_pending_interrupts))
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
mark_all_dirty(svm->vmcb);
} }
int nested_svm_vmrun(struct vcpu_svm *svm) int nested_svm_vmrun(struct vcpu_svm *svm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment