Commit 26e122e9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "All bugfixes except for a couple cleanup patches"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: VMX: Remove vcpu_vmx's defunct copy of host_pkru
  KVM: x86: allow TSC to differ by NTP correction bounds without TSC scaling
  KVM: X86: Fix MSR range of APIC registers in X2APIC mode
  KVM: VMX: Stop context switching MSR_IA32_UMWAIT_CONTROL
  KVM: nVMX: Plumb L2 GPA through to PML emulation
  KVM: x86/mmu: Avoid mixing gpa_t with gfn_t in walk_addr_generic()
  KVM: LAPIC: ensure APIC map is up to date on concurrent update requests
  kvm: lapic: fix broken vcpu hotplug
  Revert "KVM: VMX: Micro-optimize vmexit time when not exposing PMU"
  KVM: VMX: Add helpers to identify interrupt type from intr_info
  kvm/svm: disable KCSAN for svm_vcpu_run()
  KVM: MIPS: Fix a build error for !CPU_LOONGSON64
parents 3e08a952 e4553b49
...@@ -67,7 +67,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -67,7 +67,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("vz_ghfc", vz_ghfc_exits), VCPU_STAT("vz_ghfc", vz_ghfc_exits),
VCPU_STAT("vz_gpa", vz_gpa_exits), VCPU_STAT("vz_gpa", vz_gpa_exits),
VCPU_STAT("vz_resvd", vz_resvd_exits), VCPU_STAT("vz_resvd", vz_resvd_exits),
#ifdef CONFIG_CPU_LOONGSON64
VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
#endif
#endif #endif
VCPU_STAT("halt_successful_poll", halt_successful_poll), VCPU_STAT("halt_successful_poll", halt_successful_poll),
VCPU_STAT("halt_attempted_poll", halt_attempted_poll), VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
......
...@@ -943,7 +943,7 @@ struct kvm_arch { ...@@ -943,7 +943,7 @@ struct kvm_arch {
atomic_t vapics_in_nmi_mode; atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock; struct mutex apic_map_lock;
struct kvm_apic_map *apic_map; struct kvm_apic_map *apic_map;
bool apic_map_dirty; atomic_t apic_map_dirty;
bool apic_access_page_done; bool apic_access_page_done;
unsigned long apicv_inhibit_reasons; unsigned long apicv_inhibit_reasons;
...@@ -1220,7 +1220,7 @@ struct kvm_x86_ops { ...@@ -1220,7 +1220,7 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm, void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask); gfn_t offset, unsigned long mask);
int (*write_log_dirty)(struct kvm_vcpu *vcpu); int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
/* pmu operations of sub-arch */ /* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops; const struct kvm_pmu_ops *pmu_ops;
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#define TPAUSE_C01_STATE 1 #define TPAUSE_C01_STATE 1
#define TPAUSE_C02_STATE 0 #define TPAUSE_C02_STATE 0
u32 get_umwait_control_msr(void);
static inline void __monitor(const void *eax, unsigned long ecx, static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx) unsigned long edx)
{ {
......
...@@ -18,12 +18,6 @@ ...@@ -18,12 +18,6 @@
*/ */
static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
u32 get_umwait_control_msr(void)
{
return umwait_control_cached;
}
EXPORT_SYMBOL_GPL(get_umwait_control_msr);
/* /*
* Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
* hardware or BIOS before kernel boot. * hardware or BIOS before kernel boot.
......
...@@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu) ...@@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
kvfree(map); kvfree(map);
} }
/*
* CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
*
* DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
* apic_map_lock_held.
*/
enum {
CLEAN,
UPDATE_IN_PROGRESS,
DIRTY
};
void kvm_recalculate_apic_map(struct kvm *kvm) void kvm_recalculate_apic_map(struct kvm *kvm)
{ {
struct kvm_apic_map *new, *old = NULL; struct kvm_apic_map *new, *old = NULL;
...@@ -176,17 +188,17 @@ void kvm_recalculate_apic_map(struct kvm *kvm) ...@@ -176,17 +188,17 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
int i; int i;
u32 max_id = 255; /* enough space for any xAPIC ID */ u32 max_id = 255; /* enough space for any xAPIC ID */
if (!kvm->arch.apic_map_dirty) { /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
/* if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
* Read kvm->arch.apic_map_dirty before
* kvm->arch.apic_map
*/
smp_rmb();
return; return;
}
mutex_lock(&kvm->arch.apic_map_lock); mutex_lock(&kvm->arch.apic_map_lock);
if (!kvm->arch.apic_map_dirty) { /*
* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
* (if clean) or the APIC registers (if dirty).
*/
if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
/* Someone else has updated the map. */ /* Someone else has updated the map. */
mutex_unlock(&kvm->arch.apic_map_lock); mutex_unlock(&kvm->arch.apic_map_lock);
return; return;
...@@ -256,11 +268,11 @@ void kvm_recalculate_apic_map(struct kvm *kvm) ...@@ -256,11 +268,11 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
lockdep_is_held(&kvm->arch.apic_map_lock)); lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new); rcu_assign_pointer(kvm->arch.apic_map, new);
/* /*
* Write kvm->arch.apic_map before * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
* clearing apic->apic_map_dirty * If another update has come in, leave it DIRTY.
*/ */
smp_wmb(); atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
kvm->arch.apic_map_dirty = false; UPDATE_IN_PROGRESS, CLEAN);
mutex_unlock(&kvm->arch.apic_map_lock); mutex_unlock(&kvm->arch.apic_map_lock);
if (old) if (old)
...@@ -282,20 +294,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) ...@@ -282,20 +294,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
else else
static_key_slow_inc(&apic_sw_disabled.key); static_key_slow_inc(&apic_sw_disabled.key);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
} }
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{ {
kvm_lapic_set_reg(apic, APIC_ID, id << 24); kvm_lapic_set_reg(apic, APIC_ID, id << 24);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{ {
kvm_lapic_set_reg(apic, APIC_LDR, id); kvm_lapic_set_reg(apic, APIC_LDR, id);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
static inline u32 kvm_apic_calc_x2apic_ldr(u32 id) static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
...@@ -311,7 +323,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id) ...@@ -311,7 +323,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
kvm_lapic_set_reg(apic, APIC_ID, id); kvm_lapic_set_reg(apic, APIC_ID, id);
kvm_lapic_set_reg(apic, APIC_LDR, ldr); kvm_lapic_set_reg(apic, APIC_LDR, ldr);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
...@@ -1976,7 +1988,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) ...@@ -1976,7 +1988,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_DFR: case APIC_DFR:
if (!apic_x2apic_mode(apic)) { if (!apic_x2apic_mode(apic)) {
kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
apic->vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} else } else
ret = 1; ret = 1;
break; break;
...@@ -2232,7 +2244,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) ...@@ -2232,7 +2244,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
static_key_slow_dec_deferred(&apic_hw_disabled); static_key_slow_dec_deferred(&apic_hw_disabled);
} else { } else {
static_key_slow_inc(&apic_hw_disabled.key); static_key_slow_inc(&apic_hw_disabled.key);
vcpu->kvm->arch.apic_map_dirty = true; atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
} }
} }
...@@ -2273,7 +2285,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -2273,7 +2285,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
if (!apic) if (!apic)
return; return;
vcpu->kvm->arch.apic_map_dirty = false;
/* Stop the timer in case it's a reset to an active apic */ /* Stop the timer in case it's a reset to an active apic */
hrtimer_cancel(&apic->lapic_timer.timer); hrtimer_cancel(&apic->lapic_timer.timer);
...@@ -2567,6 +2578,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) ...@@ -2567,6 +2578,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
} }
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
kvm_recalculate_apic_map(vcpu->kvm); kvm_recalculate_apic_map(vcpu->kvm);
kvm_apic_set_version(vcpu); kvm_apic_set_version(vcpu);
......
...@@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); ...@@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn); struct kvm_memory_slot *slot, u64 gfn);
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
int kvm_mmu_post_init_vm(struct kvm *kvm); int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
......
...@@ -1745,10 +1745,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, ...@@ -1745,10 +1745,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
* Emulate arch specific page modification logging for the * Emulate arch specific page modification logging for the
* nested hypervisor * nested hypervisor
*/ */
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
{ {
if (kvm_x86_ops.write_log_dirty) if (kvm_x86_ops.write_log_dirty)
return kvm_x86_ops.write_log_dirty(vcpu); return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);
return 0; return 0;
} }
......
...@@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte) ...@@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, struct kvm_mmu *mmu,
struct guest_walker *walker, struct guest_walker *walker,
int write_fault) gpa_t addr, int write_fault)
{ {
unsigned level, index; unsigned level, index;
pt_element_t pte, orig_pte; pt_element_t pte, orig_pte;
...@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, ...@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
!(pte & PT_GUEST_DIRTY_MASK)) { !(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT #if PTTYPE == PTTYPE_EPT
if (kvm_arch_write_log_dirty(vcpu)) if (kvm_arch_write_log_dirty(vcpu, addr))
return -EINVAL; return -EINVAL;
#endif #endif
pte |= PT_GUEST_DIRTY_MASK; pte |= PT_GUEST_DIRTY_MASK;
...@@ -360,7 +360,6 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -360,7 +360,6 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
++walker->level; ++walker->level;
do { do {
gfn_t real_gfn;
unsigned long host_addr; unsigned long host_addr;
pt_access = pte_access; pt_access = pte_access;
...@@ -375,7 +374,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -375,7 +374,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
walker->table_gfn[walker->level - 1] = table_gfn; walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa; walker->pte_gpa[walker->level - 1] = pte_gpa;
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
nested_access, nested_access,
&walker->fault); &walker->fault);
...@@ -389,12 +388,10 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -389,12 +388,10 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
* information to fix the exit_qualification or exit_info_1 * information to fix the exit_qualification or exit_info_1
* fields. * fields.
*/ */
if (unlikely(real_gfn == UNMAPPED_GVA)) if (unlikely(real_gpa == UNMAPPED_GVA))
return 0; return 0;
real_gfn = gpa_to_gfn(real_gfn); host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
&walker->pte_writable[walker->level - 1]); &walker->pte_writable[walker->level - 1]);
if (unlikely(kvm_is_error_hva(host_addr))) if (unlikely(kvm_is_error_hva(host_addr)))
goto error; goto error;
...@@ -457,7 +454,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -457,7 +454,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
if (unlikely(!accessed_dirty)) { if (unlikely(!accessed_dirty)) {
ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
addr, write_fault);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
goto error; goto error;
else if (ret) else if (ret)
......
...@@ -3344,7 +3344,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) ...@@ -3344,7 +3344,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
{ {
fastpath_t exit_fastpath; fastpath_t exit_fastpath;
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
......
...@@ -72,11 +72,24 @@ struct loaded_vmcs { ...@@ -72,11 +72,24 @@ struct loaded_vmcs {
struct vmcs_controls_shadow controls_shadow; struct vmcs_controls_shadow controls_shadow;
}; };
static inline bool is_intr_type(u32 intr_info, u32 type)
{
const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
}
static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
{
const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
INTR_INFO_VECTOR_MASK;
return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
}
static inline bool is_exception_n(u32 intr_info, u8 vector) static inline bool is_exception_n(u32 intr_info, u8 vector)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
} }
static inline bool is_debug(u32 intr_info) static inline bool is_debug(u32 intr_info)
...@@ -106,28 +119,23 @@ static inline bool is_gp_fault(u32 intr_info) ...@@ -106,28 +119,23 @@ static inline bool is_gp_fault(u32 intr_info)
static inline bool is_machine_check(u32 intr_info) static inline bool is_machine_check(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | return is_exception_n(intr_info, MC_VECTOR);
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
} }
/* Undocumented: icebp/int1 */ /* Undocumented: icebp/int1 */
static inline bool is_icebp(u32 intr_info) static inline bool is_icebp(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
== (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
} }
static inline bool is_nmi(u32 intr_info) static inline bool is_nmi(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
== (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
} }
static inline bool is_external_intr(u32 intr_info) static inline bool is_external_intr(u32 intr_info)
{ {
return (intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK)) return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR);
} }
enum vmcs_field_width { enum vmcs_field_width {
......
...@@ -6606,23 +6606,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) ...@@ -6606,23 +6606,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host, false); msrs[i].host, false);
} }
static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
{
u32 host_umwait_control;
if (!vmx_has_waitpkg(vmx))
return;
host_umwait_control = get_umwait_control_msr();
if (vmx->msr_ia32_umwait_control != host_umwait_control)
add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
vmx->msr_ia32_umwait_control,
host_umwait_control, false);
else
clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
}
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -6728,9 +6711,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6728,9 +6711,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
pt_guest_enter(vmx); pt_guest_enter(vmx);
if (vcpu_to_pmu(vcpu)->version) atomic_switch_perf_msrs(vmx);
atomic_switch_perf_msrs(vmx);
atomic_switch_umwait_control_msr(vmx);
if (enable_preemption_timer) if (enable_preemption_timer)
vmx_update_hv_timer(vcpu); vmx_update_hv_timer(vcpu);
...@@ -7501,11 +7482,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm) ...@@ -7501,11 +7482,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm); kvm_flush_pml_buffers(kvm);
} }
static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{ {
struct vmcs12 *vmcs12; struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t gpa, dst; gpa_t dst;
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
WARN_ON_ONCE(vmx->nested.pml_full); WARN_ON_ONCE(vmx->nested.pml_full);
...@@ -7524,7 +7505,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) ...@@ -7524,7 +7505,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; gpa &= ~0xFFFull;
dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
......
...@@ -288,8 +288,6 @@ struct vcpu_vmx { ...@@ -288,8 +288,6 @@ struct vcpu_vmx {
u64 current_tsc_ratio; u64 current_tsc_ratio;
u32 host_pkru;
unsigned long host_debugctlmsr; unsigned long host_debugctlmsr;
/* /*
......
...@@ -2856,7 +2856,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2856,7 +2856,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return kvm_mtrr_set_msr(vcpu, msr, data); return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info); return kvm_set_apic_base(vcpu, msr_info);
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_write(vcpu, msr, data); return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE: case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data); kvm_set_lapic_tscdeadline_msr(vcpu, data);
...@@ -3196,7 +3196,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3196,7 +3196,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
msr_info->data = kvm_get_apic_base(vcpu); msr_info->data = kvm_get_apic_base(vcpu);
break; break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
case MSR_IA32_TSCDEADLINE: case MSR_IA32_TSCDEADLINE:
msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
...@@ -4603,7 +4603,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -4603,7 +4603,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EINVAL; r = -EINVAL;
user_tsc_khz = (u32)arg; user_tsc_khz = (u32)arg;
if (user_tsc_khz >= kvm_max_guest_tsc_khz) if (kvm_has_tsc_control &&
user_tsc_khz >= kvm_max_guest_tsc_khz)
goto out; goto out;
if (user_tsc_khz == 0) if (user_tsc_khz == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment