Commit 84ea5c09 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paolo Bonzini

KVM: X86: Add huge_page_level to __reset_rsvds_bits_mask_ept()

Bit 7 on pte depends on the level of supported large page.
Signed-off-by: default avatarLai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20211124122055.64424-7-jiangshanlai@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c59a0f57
...@@ -4358,22 +4358,28 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, ...@@ -4358,22 +4358,28 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
static void static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
u64 pa_bits_rsvd, bool execonly) u64 pa_bits_rsvd, bool execonly, int huge_page_level)
{ {
u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
u64 bad_mt_xwr; u64 bad_mt_xwr;
if (huge_page_level < PG_LEVEL_1G)
large_1g_rsvd = rsvd_bits(7, 7);
if (huge_page_level < PG_LEVEL_2M)
large_2m_rsvd = rsvd_bits(7, 7);
rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7); rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7); rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6); rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6); rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd; rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
/* large page */ /* large page */
rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29); rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20); rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
...@@ -4389,10 +4395,11 @@ __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, ...@@ -4389,10 +4395,11 @@ __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
} }
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly) struct kvm_mmu *context, bool execonly, int huge_page_level)
{ {
__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
vcpu->arch.reserved_gpa_bits, execonly); vcpu->arch.reserved_gpa_bits, execonly,
huge_page_level);
} }
static inline u64 reserved_hpa_bits(void) static inline u64 reserved_hpa_bits(void)
...@@ -4468,7 +4475,8 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, ...@@ -4468,7 +4475,8 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
false, true); false, true);
else else
__reset_rsvds_bits_mask_ept(shadow_zero_check, __reset_rsvds_bits_mask_ept(shadow_zero_check,
reserved_hpa_bits(), false); reserved_hpa_bits(), false,
max_huge_page_level);
if (!shadow_me_mask) if (!shadow_me_mask)
return; return;
...@@ -4488,7 +4496,8 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, ...@@ -4488,7 +4496,8 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly) struct kvm_mmu *context, bool execonly)
{ {
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check, __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
reserved_hpa_bits(), execonly); reserved_hpa_bits(), execonly,
max_huge_page_level);
} }
#define BYTE_MASK(access) \ #define BYTE_MASK(access) \
...@@ -4923,7 +4932,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, ...@@ -4923,7 +4932,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
update_permission_bitmask(context, true); update_permission_bitmask(context, true);
context->pkru_mask = 0; context->pkru_mask = 0;
reset_rsvds_bits_mask_ept(vcpu, context, execonly); reset_rsvds_bits_mask_ept(vcpu, context, execonly, max_huge_page_level);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
} }
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment