Commit b222b0b8 authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Paolo Bonzini

KVM: nSVM: refactor the CR3 reload on migration

Document the actual reason why we need to do it
on migration and move the call to svm_set_nested_state
to be closer to VMX code.

To avoid loading the PDPTRs from possibly not up to date memory map,
in nested_svm_load_cr3 after the move, move this code to
.get_nested_state_pages.
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210607090203.133058-5-mlevitsk@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c7313155
...@@ -385,12 +385,12 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm) ...@@ -385,12 +385,12 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
* if we are emulating VM-Entry into a guest with NPT enabled. * if we are emulating VM-Entry into a guest with NPT enabled.
*/ */
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
bool nested_npt) bool nested_npt, bool reload_pdptrs)
{ {
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
return -EINVAL; return -EINVAL;
if (!nested_npt && is_pae_paging(vcpu) && if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
return -EINVAL; return -EINVAL;
...@@ -574,7 +574,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, ...@@ -574,7 +574,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
nested_vmcb02_prepare_save(svm, vmcb12); nested_vmcb02_prepare_save(svm, vmcb12);
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
nested_npt_enabled(svm)); nested_npt_enabled(svm), true);
if (ret) if (ret)
return ret; return ret;
...@@ -801,7 +801,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) ...@@ -801,7 +801,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
nested_svm_uninit_mmu_context(vcpu); nested_svm_uninit_mmu_context(vcpu);
rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false); rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
if (rc) if (rc)
return 1; return 1;
...@@ -1297,6 +1297,19 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -1297,6 +1297,19 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
!nested_vmcb_valid_sregs(vcpu, save)) !nested_vmcb_valid_sregs(vcpu, save))
goto out_free; goto out_free;
/*
* While the nested guest CR3 is already checked and set by
* KVM_SET_SREGS, it was set when nested state was yet loaded,
* thus MMU might not be initialized correctly.
* Set it again to fix this.
*/
ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
nested_npt_enabled(svm), false);
if (WARN_ON_ONCE(ret))
goto out_free;
/* /*
* All checks done, we can enter guest mode. Userspace provides * All checks done, we can enter guest mode. Userspace provides
* vmcb12.control, which will be combined with L1 and stored into * vmcb12.control, which will be combined with L1 and stored into
...@@ -1354,8 +1367,13 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) ...@@ -1354,8 +1367,13 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
if (WARN_ON(!is_guest_mode(vcpu))) if (WARN_ON(!is_guest_mode(vcpu)))
return true; return true;
if (nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, if (!nested_npt_enabled(svm) && is_pae_paging(vcpu))
nested_npt_enabled(svm))) /*
* Reload the guest's PDPTRs since after a migration
* the guest CR3 might be restored prior to setting the nested
* state which can lead to a load of wrong PDPTRs.
*/
if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
return false; return false;
if (!nested_svm_vmrun_msrpm(svm)) { if (!nested_svm_vmrun_msrpm(svm)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment