Commit b07a5c53 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: vmx: use MSR_IA32_TSX_CTRL to hard-disable TSX on guest that lack it

If X86_FEATURE_RTM is disabled, the guest should not be able to access
MSR_IA32_TSX_CTRL.  We can therefore use it in KVM to force all
transactions from the guest to abort.
Tested-by: default avatarJim Mattson <jmattson@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c11f83e0
...@@ -639,6 +639,23 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) ...@@ -639,6 +639,23 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
return NULL; return NULL;
} }
static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr, u64 data)
{
int ret = 0;
u64 old_msr_data = msr->data;
msr->data = data;
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
preempt_disable();
ret = kvm_set_shared_msr(msr->index, msr->data,
msr->mask);
preempt_enable();
if (ret)
msr->data = old_msr_data;
}
return ret;
}
void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs) void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{ {
vmcs_clear(loaded_vmcs->vmcs); vmcs_clear(loaded_vmcs->vmcs);
...@@ -2174,19 +2191,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2174,19 +2191,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
default: default:
find_shared_msr: find_shared_msr:
msr = find_msr_entry(vmx, msr_index); msr = find_msr_entry(vmx, msr_index);
if (msr) { if (msr)
u64 old_msr_data = msr->data; ret = vmx_set_guest_msr(vmx, msr, data);
msr->data = data; else
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
preempt_disable();
ret = kvm_set_shared_msr(msr->index, msr->data,
msr->mask);
preempt_enable();
if (ret)
msr->data = old_msr_data;
}
break;
}
ret = kvm_set_msr_common(vcpu, msr_info); ret = kvm_set_msr_common(vcpu, msr_info);
} }
...@@ -7142,6 +7149,15 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) ...@@ -7142,6 +7149,15 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
if (boot_cpu_has(X86_FEATURE_INTEL_PT) && if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT)) guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
update_intel_pt_cfg(vcpu); update_intel_pt_cfg(vcpu);
if (boot_cpu_has(X86_FEATURE_RTM)) {
struct shared_msr_entry *msr;
msr = find_msr_entry(vmx, MSR_IA32_TSX_CTRL);
if (msr) {
bool enabled = guest_cpuid_has(vcpu, X86_FEATURE_RTM);
vmx_set_guest_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
}
}
} }
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment