Commit ea2800dd authored by Brijesh Singh's avatar Brijesh Singh Committed by Ingo Molnar

kvm/x86: Avoid clearing the C-bit in rsvd_bits()

The following commit:

  d0ec49d4 ("kvm/x86/svm: Support Secure Memory Encryption within KVM")

uses __sme_clr() to remove the C-bit in rsvd_bits(). rsvd_bits() is
just a simple function to return some 1 bits. Applying a mask based
on properties of the host MMU is incorrect. Additionally, the masks
computed by __reset_rsvds_bits_mask also apply to guest page tables,
where the C bit is reserved since we don't emulate SME.

The fix is to clear the C-bit from rsvd_bits_mask array after it has been
populated from __reset_rsvds_bits_mask()
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: kvm@vger.kernel.org
Cc: paolo.bonzini@gmail.com
Fixes: d0ec49d4 ("kvm/x86/svm: Support Secure Memory Encryption within KVM")
Link: http://lkml.kernel.org/r/20170825205540.123531-1-brijesh.singh@amd.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 413d63d7
...@@ -4109,16 +4109,28 @@ void ...@@ -4109,16 +4109,28 @@ void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{ {
bool uses_nx = context->nx || context->base_role.smep_andnot_wp; bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
struct rsvd_bits_validate *shadow_zero_check;
int i;
/* /*
* Passing "true" to the last argument is okay; it adds a check * Passing "true" to the last argument is okay; it adds a check
* on bit 8 of the SPTEs which KVM doesn't use anyway. * on bit 8 of the SPTEs which KVM doesn't use anyway.
*/ */
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, shadow_zero_check = &context->shadow_zero_check;
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
boot_cpu_data.x86_phys_bits, boot_cpu_data.x86_phys_bits,
context->shadow_root_level, uses_nx, context->shadow_root_level, uses_nx,
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu), guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
true); true);
if (!shadow_me_mask)
return;
for (i = context->shadow_root_level; --i >= 0;) {
shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
}
} }
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
...@@ -4136,17 +4148,29 @@ static void ...@@ -4136,17 +4148,29 @@ static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context) struct kvm_mmu *context)
{ {
struct rsvd_bits_validate *shadow_zero_check;
int i;
shadow_zero_check = &context->shadow_zero_check;
if (boot_cpu_is_amd()) if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
boot_cpu_data.x86_phys_bits, boot_cpu_data.x86_phys_bits,
context->shadow_root_level, false, context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES), boot_cpu_has(X86_FEATURE_GBPAGES),
true, true); true, true);
else else
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check, __reset_rsvds_bits_mask_ept(shadow_zero_check,
boot_cpu_data.x86_phys_bits, boot_cpu_data.x86_phys_bits,
false); false);
if (!shadow_me_mask)
return;
for (i = context->shadow_root_level; --i >= 0;) {
shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
}
} }
/* /*
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
static inline u64 rsvd_bits(int s, int e) static inline u64 rsvd_bits(int s, int e)
{ {
return __sme_clr(((1ULL << (e - s + 1)) - 1) << s); return ((1ULL << (e - s + 1)) - 1) << s;
} }
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment