Commit b18d5431 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: x86: fix CR0.CD virtualization

Currently, CR0.CD is not checked when we virtualize memory cache type for
noncoherent_dma guests, this patch fixes it by :

- setting UC for all memory if CR0.CD = 1
- zapping all the last sptes in MMU if CR0.CD is changed
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f104765b
...@@ -8628,7 +8628,8 @@ static int get_ept_level(void) ...@@ -8628,7 +8628,8 @@ static int get_ept_level(void)
static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{ {
u64 ret; u8 cache;
u64 ipat = 0;
/* For VT-d and EPT combination /* For VT-d and EPT combination
* 1. MMIO: always map as UC * 1. MMIO: always map as UC
...@@ -8641,16 +8642,27 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) ...@@ -8641,16 +8642,27 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
* 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
* consistent with host MTRR * consistent with host MTRR
*/ */
if (is_mmio) if (is_mmio) {
ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; cache = MTRR_TYPE_UNCACHABLE;
else if (kvm_arch_has_noncoherent_dma(vcpu->kvm)) goto exit;
ret = kvm_get_guest_memory_type(vcpu, gfn) << }
VMX_EPT_MT_EPTE_SHIFT;
else
ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
| VMX_EPT_IPAT_BIT;
return ret; if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
ipat = VMX_EPT_IPAT_BIT;
cache = MTRR_TYPE_WRBACK;
goto exit;
}
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
ipat = VMX_EPT_IPAT_BIT;
cache = MTRR_TYPE_UNCACHABLE;
goto exit;
}
cache = kvm_get_guest_memory_type(vcpu, gfn);
exit:
return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
} }
static int vmx_get_lpage_level(void) static int vmx_get_lpage_level(void)
......
...@@ -621,6 +621,10 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -621,6 +621,10 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
if ((cr0 ^ old_cr0) & update_bits) if ((cr0 ^ old_cr0) & update_bits)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
if ((cr0 ^ old_cr0) & X86_CR0_CD)
kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(kvm_set_cr0); EXPORT_SYMBOL_GPL(kvm_set_cr0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment