Commit 6e3ba4ab authored by Tao Xu's avatar Tao Xu Committed by Paolo Bonzini

KVM: vmx: Emulate MSR IA32_UMWAIT_CONTROL

UMWAIT and TPAUSE instructions use 32bit IA32_UMWAIT_CONTROL at MSR index
E1H to determines the maximum time in TSC-quanta that the processor can
reside in either C0.1 or C0.2.

This patch emulates MSR IA32_UMWAIT_CONTROL in guest and differentiate
IA32_UMWAIT_CONTROL between host and guest. The variable
mwait_control_cached in arch/x86/kernel/cpu/umwait.c caches the MSR value,
so this patch uses it to avoid frequently rdmsr of IA32_UMWAIT_CONTROL.
Co-developed-by: default avatarJingqi Liu <jingqi.liu@intel.com>
Signed-off-by: default avatarJingqi Liu <jingqi.liu@intel.com>
Signed-off-by: default avatarTao Xu <tao3.xu@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e69e72fa
...@@ -17,6 +17,12 @@ ...@@ -17,6 +17,12 @@
*/ */
static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE); static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
u32 get_umwait_control_msr(void)
{
return umwait_control_cached;
}
EXPORT_SYMBOL_GPL(get_umwait_control_msr);
/* /*
* Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
* hardware or BIOS before kernel boot. * hardware or BIOS before kernel boot.
......
...@@ -1733,6 +1733,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1733,6 +1733,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
#endif #endif
case MSR_EFER: case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_info); return kvm_get_msr_common(vcpu, msr_info);
case MSR_IA32_UMWAIT_CONTROL:
if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
return 1;
msr_info->data = vmx->msr_ia32_umwait_control;
break;
case MSR_IA32_SPEC_CTRL: case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated && if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
...@@ -1906,6 +1912,16 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1906,6 +1912,16 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1; return 1;
vmcs_write64(GUEST_BNDCFGS, data); vmcs_write64(GUEST_BNDCFGS, data);
break; break;
case MSR_IA32_UMWAIT_CONTROL:
if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
return 1;
/* The reserved bit 1 and non-32 bit [63:32] should be zero */
if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32)))
return 1;
vmx->msr_ia32_umwait_control = data;
break;
case MSR_IA32_SPEC_CTRL: case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated && if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
...@@ -4211,6 +4227,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -4211,6 +4227,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx->rmode.vm86_active = 0; vmx->rmode.vm86_active = 0;
vmx->spec_ctrl = 0; vmx->spec_ctrl = 0;
vmx->msr_ia32_umwait_control = 0;
vcpu->arch.microcode_version = 0x100000000ULL; vcpu->arch.microcode_version = 0x100000000ULL;
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
vmx->hv_deadline_tsc = -1; vmx->hv_deadline_tsc = -1;
...@@ -6384,6 +6402,23 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) ...@@ -6384,6 +6402,23 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host, false); msrs[i].host, false);
} }
static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
{
u32 host_umwait_control;
if (!vmx_has_waitpkg(vmx))
return;
host_umwait_control = get_umwait_control_msr();
if (vmx->msr_ia32_umwait_control != host_umwait_control)
add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
vmx->msr_ia32_umwait_control,
host_umwait_control, false);
else
clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
}
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu) static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
...@@ -6478,6 +6513,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6478,6 +6513,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
pt_guest_enter(vmx); pt_guest_enter(vmx);
atomic_switch_perf_msrs(vmx); atomic_switch_perf_msrs(vmx);
atomic_switch_umwait_control_msr(vmx);
if (enable_preemption_timer) if (enable_preemption_timer)
vmx_update_hv_timer(vcpu); vmx_update_hv_timer(vcpu);
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
extern const u32 vmx_msr_index[]; extern const u32 vmx_msr_index[];
extern u64 host_efer; extern u64 host_efer;
extern u32 get_umwait_control_msr(void);
#define MSR_TYPE_R 1 #define MSR_TYPE_R 1
#define MSR_TYPE_W 2 #define MSR_TYPE_W 2
#define MSR_TYPE_RW 3 #define MSR_TYPE_RW 3
...@@ -211,6 +213,7 @@ struct vcpu_vmx { ...@@ -211,6 +213,7 @@ struct vcpu_vmx {
#endif #endif
u64 spec_ctrl; u64 spec_ctrl;
u32 msr_ia32_umwait_control;
u32 secondary_exec_control; u32 secondary_exec_control;
...@@ -497,6 +500,12 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) ...@@ -497,6 +500,12 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
} }
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
{
return vmx->secondary_exec_control &
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
}
void dump_vmcs(void); void dump_vmcs(void);
#endif /* __KVM_X86_VMX_H */ #endif /* __KVM_X86_VMX_H */
...@@ -1145,6 +1145,8 @@ static u32 msrs_to_save[] = { ...@@ -1145,6 +1145,8 @@ static u32 msrs_to_save[] = {
MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B, MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
MSR_IA32_UMWAIT_CONTROL,
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3, MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment