Commit d468d94b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Dynamically calculate TDP level from max level and MAXPHYADDR

Calculate the desired TDP level on the fly using the max TDP level and
MAXPHYADDR instead of doing the same when CPUID is updated.  This avoids
the hidden dependency on cpuid_maxphyaddr() in vmx_get_tdp_level() and
also standardizes the "use 5-level paging iff MAXPHYADDR > 48" behavior
across x86.
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200716034122.5998-8-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f83a4a69
......@@ -639,7 +639,7 @@ struct kvm_vcpu_arch {
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
int maxphyaddr;
int tdp_level;
int max_tdp_level;
/* emulate context */
......@@ -1133,7 +1133,7 @@ struct kvm_x86_ops {
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
int (*get_tdp_level)(struct kvm_vcpu *vcpu);
int (*get_max_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long pgd,
......
......@@ -140,9 +140,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 =
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
/* Note, maxphyaddr must be updated before tdp_level. */
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.tdp_level = kvm_x86_ops.get_tdp_level(vcpu);
kvm_mmu_reset_context(vcpu);
kvm_pmu_refresh(vcpu);
......
......@@ -4846,13 +4846,22 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
return role;
}
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{
/* Use 5-level TDP if and only if it's useful/necessary. */
if (vcpu->arch.max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
return 4;
return vcpu->arch.max_tdp_level;
}
static union kvm_mmu_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
{
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = vcpu->arch.tdp_level;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
role.base.direct = true;
role.base.gpte_is_8_bytes = true;
......@@ -4873,7 +4882,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = vcpu->arch.tdp_level;
context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
context->direct_map = true;
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
......@@ -4973,7 +4982,7 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
kvm_calc_shadow_root_page_role_common(vcpu, false);
role.base.direct = false;
role.base.level = vcpu->arch.tdp_level;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
return role;
}
......@@ -5683,7 +5692,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
* SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
* skip allocating the PDP table.
*/
if (tdp_enabled && vcpu->arch.tdp_level > PT32E_ROOT_LEVEL)
if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
return 0;
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
......
......@@ -254,7 +254,7 @@ static inline void invlpga(unsigned long addr, u32 asid)
asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
}
static int get_npt_level(struct kvm_vcpu *vcpu)
static int get_max_npt_level(void)
{
#ifdef CONFIG_X86_64
return PT64_ROOT_4LEVEL;
......@@ -4109,7 +4109,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_tss_addr = svm_set_tss_addr,
.set_identity_map_addr = svm_set_identity_map_addr,
.get_tdp_level = get_npt_level,
.get_max_tdp_level = get_max_npt_level,
.get_mt_mask = svm_get_mt_mask,
.get_exit_info = svm_get_exit_info,
......
......@@ -3065,9 +3065,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vmx->emulation_required = emulation_required(vcpu);
}
static int vmx_get_tdp_level(struct kvm_vcpu *vcpu)
static int vmx_get_max_tdp_level(void)
{
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
if (cpu_has_vmx_ept_5levels())
return 5;
return 4;
}
......@@ -7959,7 +7959,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_tss_addr = vmx_set_tss_addr,
.set_identity_map_addr = vmx_set_identity_map_addr,
.get_tdp_level = vmx_get_tdp_level,
.get_max_tdp_level = vmx_get_max_tdp_level,
.get_mt_mask = vmx_get_mt_mask,
.get_exit_info = vmx_get_exit_info,
......
......@@ -9520,7 +9520,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
fx_init(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.tdp_level = kvm_x86_ops.get_tdp_level(vcpu);
vcpu->arch.max_tdp_level = kvm_x86_ops.get_max_tdp_level();
vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment