Commit 6f8e65a6 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Add helper to generate mask of reserved HPA bits

Add a helper to generate the mask of reserved PA bits in the host.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210204000117.3303214-10-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5b7f575c
...@@ -4123,6 +4123,11 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, ...@@ -4123,6 +4123,11 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
vcpu->arch.reserved_gpa_bits, execonly); vcpu->arch.reserved_gpa_bits, execonly);
} }
static inline u64 reserved_hpa_bits(void)
{
return rsvd_bits(shadow_phys_bits, 63);
}
/* /*
* the page table on host is the shadow page table for the page * the page table on host is the shadow page table for the page
* table in guest or amd nested guest, its mmu features completely * table in guest or amd nested guest, its mmu features completely
...@@ -4142,7 +4147,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) ...@@ -4142,7 +4147,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
*/ */
shadow_zero_check = &context->shadow_zero_check; shadow_zero_check = &context->shadow_zero_check;
__reset_rsvds_bits_mask(vcpu, shadow_zero_check, __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
rsvd_bits(shadow_phys_bits, 63), reserved_hpa_bits(),
context->shadow_root_level, uses_nx, context->shadow_root_level, uses_nx,
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
is_pse(vcpu), true); is_pse(vcpu), true);
...@@ -4179,14 +4184,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, ...@@ -4179,14 +4184,13 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
if (boot_cpu_is_amd()) if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, shadow_zero_check, __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
rsvd_bits(shadow_phys_bits, 63), reserved_hpa_bits(),
context->shadow_root_level, false, context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES), boot_cpu_has(X86_FEATURE_GBPAGES),
true, true); true, true);
else else
__reset_rsvds_bits_mask_ept(shadow_zero_check, __reset_rsvds_bits_mask_ept(shadow_zero_check,
rsvd_bits(shadow_phys_bits, 63), reserved_hpa_bits(), false);
false);
if (!shadow_me_mask) if (!shadow_me_mask)
return; return;
...@@ -4206,7 +4210,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, ...@@ -4206,7 +4210,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly) struct kvm_mmu *context, bool execonly)
{ {
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check, __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
rsvd_bits(shadow_phys_bits, 63), execonly); reserved_hpa_bits(), execonly);
} }
#define BYTE_MASK(access) \ #define BYTE_MASK(access) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment