Commit e93fd3b3 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Capture TDP level when updating CPUID

Snapshot the TDP level now that it's invariant (SVM) or dependent only
on host capabilities and guest CPUID (VMX).  This avoids having to call
kvm_x86_ops.get_tdp_level() when initializing a TDP MMU and/or
calculating the page role, and thus avoids the associated retpoline.

Drop the WARN in vmx_get_tdp_level() as updating CPUID while L2 is
active is legal, if dodgy.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200502043234.12481-11-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0047fcad
...@@ -687,6 +687,7 @@ struct kvm_vcpu_arch { ...@@ -687,6 +687,7 @@ struct kvm_vcpu_arch {
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
int maxphyaddr; int maxphyaddr;
int tdp_level;
/* emulate context */ /* emulate context */
......
...@@ -124,8 +124,9 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) ...@@ -124,8 +124,9 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
MSR_IA32_MISC_ENABLE_MWAIT); MSR_IA32_MISC_ENABLE_MWAIT);
} }
/* Update physical-address width */ /* Note, maxphyaddr must be updated before tdp_level. */
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.tdp_level = kvm_x86_ops.get_tdp_level(vcpu);
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
kvm_pmu_refresh(vcpu); kvm_pmu_refresh(vcpu);
......
...@@ -4894,7 +4894,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) ...@@ -4894,7 +4894,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
role.base.ad_disabled = (shadow_accessed_mask == 0); role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_x86_ops.get_tdp_level(vcpu); role.base.level = vcpu->arch.tdp_level;
role.base.direct = true; role.base.direct = true;
role.base.gpte_is_8_bytes = true; role.base.gpte_is_8_bytes = true;
...@@ -4915,7 +4915,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -4915,7 +4915,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->sync_page = nonpaging_sync_page; context->sync_page = nonpaging_sync_page;
context->invlpg = NULL; context->invlpg = NULL;
context->update_pte = nonpaging_update_pte; context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu); context->shadow_root_level = vcpu->arch.tdp_level;
context->direct_map = true; context->direct_map = true;
context->get_guest_pgd = get_cr3; context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read; context->get_pdptr = kvm_pdptr_read;
...@@ -5680,7 +5680,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) ...@@ -5680,7 +5680,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
* SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
* skip allocating the PDP table. * skip allocating the PDP table.
*/ */
if (tdp_enabled && kvm_x86_ops.get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) if (tdp_enabled && vcpu->arch.tdp_level > PT32E_ROOT_LEVEL)
return 0; return 0;
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32); page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
......
...@@ -86,7 +86,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) ...@@ -86,7 +86,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu); vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level;
reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
} }
......
...@@ -3025,8 +3025,6 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -3025,8 +3025,6 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static int vmx_get_tdp_level(struct kvm_vcpu *vcpu) static int vmx_get_tdp_level(struct kvm_vcpu *vcpu)
{ {
WARN_ON(is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)));
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
return 5; return 5;
return 4; return 4;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment