Commit c624406f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "Everything related to the new quirks and memory type features:

   - small improvements to the quirks API

   - extending one of the quirks from just AMD to Intel as well, because
     4.2 can show the same problem with problematic firmware on Intel
     too"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: rename quirk constants to KVM_X86_QUIRK_*
  KVM: vmx: obey KVM_QUIRK_CD_NW_CLEARED
  KVM: x86: introduce kvm_check_has_quirk
  KVM: MTRR: simplify kvm_mtrr_get_guest_memory_type
  KVM: MTRR: fix memory type handling if MTRR is completely disabled
parents 45b4b782 0da029ed
...@@ -354,7 +354,7 @@ struct kvm_xcrs { ...@@ -354,7 +354,7 @@ struct kvm_xcrs {
struct kvm_sync_regs { struct kvm_sync_regs {
}; };
#define KVM_QUIRK_LINT0_REENABLED (1 << 0) #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_QUIRK_CD_NW_CLEARED (1 << 1) #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#endif /* _ASM_X86_KVM_H */ #endif /* _ASM_X86_KVM_H */
...@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
for (i = 0; i < APIC_LVT_NUM; i++) for (i = 0; i < APIC_LVT_NUM; i++)
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_update_lvtt(apic); apic_update_lvtt(apic);
if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
apic_set_reg(apic, APIC_LVT0, apic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
......
...@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) ...@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
} }
static u8 mtrr_disabled_type(void)
{
/*
* Intel SDM 11.11.2.2: all MTRRs are disabled when
* IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
* memory type is applied to all of physical memory.
*/
return MTRR_TYPE_UNCACHABLE;
}
/* /*
* Three terms are used in the following code: * Three terms are used in the following code:
* - segment, it indicates the address segments covered by fixed MTRRs. * - segment, it indicates the address segments covered by fixed MTRRs.
...@@ -434,6 +444,8 @@ struct mtrr_iter { ...@@ -434,6 +444,8 @@ struct mtrr_iter {
/* output fields. */ /* output fields. */
int mem_type; int mem_type;
/* mtrr is completely disabled? */
bool mtrr_disabled;
/* [start, end) is not fully covered in MTRRs? */ /* [start, end) is not fully covered in MTRRs? */
bool partial_map; bool partial_map;
...@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter) ...@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
static void mtrr_lookup_start(struct mtrr_iter *iter) static void mtrr_lookup_start(struct mtrr_iter *iter)
{ {
if (!mtrr_is_enabled(iter->mtrr_state)) { if (!mtrr_is_enabled(iter->mtrr_state)) {
iter->partial_map = true; iter->mtrr_disabled = true;
return; return;
} }
...@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter, ...@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
iter->mtrr_state = mtrr_state; iter->mtrr_state = mtrr_state;
iter->start = start; iter->start = start;
iter->end = end; iter->end = end;
iter->mtrr_disabled = false;
iter->partial_map = false; iter->partial_map = false;
iter->fixed = false; iter->fixed = false;
iter->range = NULL; iter->range = NULL;
...@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) ...@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
return MTRR_TYPE_WRBACK; return MTRR_TYPE_WRBACK;
} }
/* It is not covered by MTRRs. */ if (iter.mtrr_disabled)
if (iter.partial_map) { return mtrr_disabled_type();
/*
* We just check one page, partially covered by MTRRs is /*
* impossible. * We just check one page, partially covered by MTRRs is
*/ * impossible.
WARN_ON(type != -1); */
type = mtrr_default_type(mtrr_state); WARN_ON(iter.partial_map);
}
/* not contained in any MTRRs. */
if (type == -1)
return mtrr_default_type(mtrr_state);
return type; return type;
} }
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
...@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
return false; return false;
} }
if (iter.mtrr_disabled)
return true;
if (!iter.partial_map) if (!iter.partial_map)
return true; return true;
......
...@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
* does not do it - this results in some delay at * does not do it - this results in some delay at
* reboot * reboot
*/ */
if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cr0 &= ~(X86_CR0_CD | X86_CR0_NW); cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0; svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR); mark_dirty(svm->vmcb, VMCB_CR);
......
...@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) ...@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
if (kvm_read_cr0(vcpu) & X86_CR0_CD) { if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
ipat = VMX_EPT_IPAT_BIT; ipat = VMX_EPT_IPAT_BIT;
cache = MTRR_TYPE_UNCACHABLE; if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cache = MTRR_TYPE_WRBACK;
else
cache = MTRR_TYPE_UNCACHABLE;
goto exit; goto exit;
} }
......
...@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu, ...@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
return kvm_register_write(vcpu, reg, val); return kvm_register_write(vcpu, reg, val);
} }
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
{
return !(kvm->arch.disabled_quirks & quirk);
}
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_set_pending_timer(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment