Commit f0f59d06 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-mmu-6.7' of https://github.com/kvm-x86/linux into HEAD

KVM x86 MMU changes for 6.7:

 - Clean up code that deals with honoring guest MTRRs when the VM has
   non-coherent DMA and host MTRRs are ignored, i.e. EPT is enabled.

 - Zap EPT entries when non-coherent DMA assignment stops/start to prevent
   using stale entries with the wrong memtype.

 - Don't ignore guest PAT for CR0.CD=1 && KVM_X86_QUIRK_CD_NW_CLEARED=y, as
   there's zero reason to ignore guest PAT if the effective MTRR memtype is WB.
   This will also allow for future optimizations of handling guest MTRR updates
   for VMs with non-coherent DMA and the quirk enabled.

 - Harden the fast page fault path to guard against encountering an invalid
   root when walking SPTEs.
parents f292dc8a 1de9992f
...@@ -237,6 +237,13 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -237,6 +237,13 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return -(u32)fault & errcode; return -(u32)fault & errcode;
} }
bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma);
static inline bool kvm_mmu_honors_guest_mtrrs(struct kvm *kvm)
{
return __kvm_mmu_honors_guest_mtrrs(kvm_arch_has_noncoherent_dma(kvm));
}
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end); void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
......
...@@ -3425,8 +3425,8 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) ...@@ -3425,8 +3425,8 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
int ret = RET_PF_INVALID; int ret = RET_PF_INVALID;
u64 spte = 0ull; u64 spte;
u64 *sptep = NULL; u64 *sptep;
uint retry_count = 0; uint retry_count = 0;
if (!page_fault_can_be_fast(fault)) if (!page_fault_can_be_fast(fault))
...@@ -3442,6 +3442,14 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) ...@@ -3442,6 +3442,14 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
else else
sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte); sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
/*
* It's entirely possible for the mapping to have been zapped
* by a different task, but the root page should always be
* available as the vCPU holds a reference to its root(s).
*/
if (WARN_ON_ONCE(!sptep))
spte = REMOVED_SPTE;
if (!is_shadow_present_pte(spte)) if (!is_shadow_present_pte(spte))
break; break;
...@@ -4479,21 +4487,28 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, ...@@ -4479,21 +4487,28 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
} }
#endif #endif
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) bool __kvm_mmu_honors_guest_mtrrs(bool vm_has_noncoherent_dma)
{ {
/* /*
* If the guest's MTRRs may be used to compute the "real" memtype, * If host MTRRs are ignored (shadow_memtype_mask is non-zero), and the
* restrict the mapping level to ensure KVM uses a consistent memtype * VM has non-coherent DMA (DMA doesn't snoop CPU caches), KVM's ABI is
* across the entire mapping. If the host MTRRs are ignored by TDP * to honor the memtype from the guest's MTRRs so that guest accesses
* (shadow_memtype_mask is non-zero), and the VM has non-coherent DMA * to memory that is DMA'd aren't cached against the guest's wishes.
* (DMA doesn't snoop CPU caches), KVM's ABI is to honor the memtype
* from the guest's MTRRs so that guest accesses to memory that is
* DMA'd aren't cached against the guest's wishes.
* *
* Note, KVM may still ultimately ignore guest MTRRs for certain PFNs, * Note, KVM may still ultimately ignore guest MTRRs for certain PFNs,
* e.g. KVM will force UC memtype for host MMIO. * e.g. KVM will force UC memtype for host MMIO.
*/ */
if (shadow_memtype_mask && kvm_arch_has_noncoherent_dma(vcpu->kvm)) { return vm_has_noncoherent_dma && shadow_memtype_mask;
}
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
/*
* If the guest's MTRRs may be used to compute the "real" memtype,
* restrict the mapping level to ensure KVM uses a consistent memtype
* across the entire mapping.
*/
if (kvm_mmu_honors_guest_mtrrs(vcpu->kvm)) {
for ( ; fault->max_level > PG_LEVEL_4K; --fault->max_level) { for ( ; fault->max_level > PG_LEVEL_4K; --fault->max_level) {
int page_num = KVM_PAGES_PER_HPAGE(fault->max_level); int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
gfn_t base = gfn_round_for_level(fault->gfn, gfn_t base = gfn_round_for_level(fault->gfn,
......
...@@ -320,7 +320,7 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) ...@@ -320,7 +320,7 @@ static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
gfn_t start, end; gfn_t start, end;
if (!tdp_enabled || !kvm_arch_has_noncoherent_dma(vcpu->kvm)) if (!kvm_mmu_honors_guest_mtrrs(vcpu->kvm))
return; return;
if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
......
...@@ -7579,8 +7579,6 @@ static int vmx_vm_init(struct kvm *kvm) ...@@ -7579,8 +7579,6 @@ static int vmx_vm_init(struct kvm *kvm)
static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{ {
u8 cache;
/* We wanted to honor guest CD/MTRR/PAT, but doing so could result in /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
* memory aliases with conflicting memory types and sometimes MCEs. * memory aliases with conflicting memory types and sometimes MCEs.
* We have to be careful as to what are honored and when. * We have to be careful as to what are honored and when.
...@@ -7607,11 +7605,10 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) ...@@ -7607,11 +7605,10 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) { if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cache = MTRR_TYPE_WRBACK; return MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT;
else else
cache = MTRR_TYPE_UNCACHABLE; return (MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT) |
VMX_EPT_IPAT_BIT;
return (cache << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
} }
return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
......
...@@ -962,7 +962,7 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon ...@@ -962,7 +962,7 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
if (((cr0 ^ old_cr0) & X86_CR0_CD) && if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
kvm_arch_has_noncoherent_dma(vcpu->kvm) && kvm_mmu_honors_guest_mtrrs(vcpu->kvm) &&
!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
} }
...@@ -13313,15 +13313,30 @@ bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) ...@@ -13313,15 +13313,30 @@ bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
} }
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm)
{
/*
* Non-coherent DMA assignment and de-assignment will affect
* whether KVM honors guest MTRRs and cause changes in memtypes
* in TDP.
* So, pass %true unconditionally to indicate non-coherent DMA was,
* or will be involved, and that zapping SPTEs might be necessary.
*/
if (__kvm_mmu_honors_guest_mtrrs(true))
kvm_zap_gfn_range(kvm, gpa_to_gfn(0), gpa_to_gfn(~0ULL));
}
void kvm_arch_register_noncoherent_dma(struct kvm *kvm) void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{ {
atomic_inc(&kvm->arch.noncoherent_dma_count); if (atomic_inc_return(&kvm->arch.noncoherent_dma_count) == 1)
kvm_noncoherent_dma_assignment_start_or_stop(kvm);
} }
EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{ {
atomic_dec(&kvm->arch.noncoherent_dma_count); if (!atomic_dec_return(&kvm->arch.noncoherent_dma_count))
kvm_noncoherent_dma_assignment_start_or_stop(kvm);
} }
EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment