Commit 929d1cfa authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: MMU: pass arbitrary CR0/CR4/EFER to kvm_init_shadow_mmu

This allows fetching the registers from the hsave area when setting
up the NPT shadow MMU, and is needed for KVM_SET_NESTED_STATE (which
runs long after the CR0, CR4 and EFER values in vcpu have been switched
to hold L2 guest state).
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c513f484
...@@ -57,7 +57,7 @@ void ...@@ -57,7 +57,7 @@ void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots); void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp); bool accessed_dirty, gpa_t new_eptp);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
......
...@@ -4952,7 +4952,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) ...@@ -4952,7 +4952,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
return role; return role;
} }
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
{ {
struct kvm_mmu *context = vcpu->arch.mmu; struct kvm_mmu *context = vcpu->arch.mmu;
union kvm_mmu_role new_role = union kvm_mmu_role new_role =
...@@ -4961,11 +4961,11 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) ...@@ -4961,11 +4961,11 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
if (new_role.as_u64 == context->mmu_role.as_u64) if (new_role.as_u64 == context->mmu_role.as_u64)
return; return;
if (!is_paging(vcpu)) if (!(cr0 & X86_CR0_PG))
nonpaging_init_context(vcpu, context); nonpaging_init_context(vcpu, context);
else if (is_long_mode(vcpu)) else if (efer & EFER_LMA)
paging64_init_context(vcpu, context); paging64_init_context(vcpu, context);
else if (is_pae(vcpu)) else if (cr4 & X86_CR4_PAE)
paging32E_init_context(vcpu, context); paging32E_init_context(vcpu, context);
else else
paging32_init_context(vcpu, context); paging32_init_context(vcpu, context);
...@@ -5043,7 +5043,11 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu) ...@@ -5043,7 +5043,11 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *context = vcpu->arch.mmu; struct kvm_mmu *context = vcpu->arch.mmu;
kvm_init_shadow_mmu(vcpu); kvm_init_shadow_mmu(vcpu,
kvm_read_cr0_bits(vcpu, X86_CR0_PG),
kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
vcpu->arch.efer);
context->get_guest_pgd = get_cr3; context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read; context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault; context->inject_page_fault = kvm_inject_page_fault;
......
...@@ -80,10 +80,13 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) ...@@ -80,10 +80,13 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *hsave = svm->nested.hsave;
WARN_ON(mmu_is_nested(vcpu)); WARN_ON(mmu_is_nested(vcpu));
vcpu->arch.mmu = &vcpu->arch.guest_mmu; vcpu->arch.mmu = &vcpu->arch.guest_mmu;
kvm_init_shadow_mmu(vcpu); kvm_init_shadow_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer);
vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment