Commit fb61464d authored by Sean Christopherson's avatar Sean Christopherson Committed by Stefan Bader

KVM: x86: Emulate MSR_IA32_ARCH_CAPABILITIES on AMD hosts

The CPUID flag ARCH_CAPABILITIES is unconditioinally exposed to host
userspace for all x86 hosts, i.e. KVM advertises ARCH_CAPABILITIES
regardless of hardware support under the pretense that KVM fully
emulates MSR_IA32_ARCH_CAPABILITIES.  Unfortunately, only VMX hosts
handle accesses to MSR_IA32_ARCH_CAPABILITIES (despite KVM_GET_MSRS
also reporting MSR_IA32_ARCH_CAPABILITIES for all hosts).

Move the MSR_IA32_ARCH_CAPABILITIES handling to common x86 code so
that it's emulated on AMD hosts.

Fixes: 1eaafe91 ("kvm: x86: IA32_ARCH_CAPABILITIES is always supported")
Cc: stable@vger.kernel.org
Reported-by: default avatarXiaoyao Li <xiaoyao.li@linux.intel.com>
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

CVE-2019-11135

(backported from commit 0cf9135b)
[tyhicks: Backport to 4.4
 - vmx.c and vmx.h are up one directory level
 - Minor context adjustments in x86.c due to different surrounding MSR
   case statements and stack variable differences in
   kvm_arch_vcpu_setup()
 - Call guest_cpuid_has_arch_capabilities() instead of the non-existent
   guest_cpuid_has()]
Signed-off-by: default avatarTyler Hicks <tyhicks@canonical.com>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 65bee31e
...@@ -440,6 +440,7 @@ struct kvm_vcpu_arch { ...@@ -440,6 +440,7 @@ struct kvm_vcpu_arch {
bool tpr_access_reporting; bool tpr_access_reporting;
u64 ia32_xss; u64 ia32_xss;
u64 microcode_version; u64 microcode_version;
u64 arch_capabilities;
/* /*
* Paging state of the vcpu * Paging state of the vcpu
......
...@@ -702,7 +702,6 @@ struct vcpu_vmx { ...@@ -702,7 +702,6 @@ struct vcpu_vmx {
u64 msr_guest_kernel_gs_base; u64 msr_guest_kernel_gs_base;
#endif #endif
u64 arch_capabilities;
u64 spec_ctrl; u64 spec_ctrl;
u32 vm_entry_controls_shadow; u32 vm_entry_controls_shadow;
...@@ -3047,12 +3046,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3047,12 +3046,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = to_vmx(vcpu)->spec_ctrl; msr_info->data = to_vmx(vcpu)->spec_ctrl;
break; break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated &&
!guest_cpuid_has_arch_capabilities(vcpu))
return 1;
msr_info->data = to_vmx(vcpu)->arch_capabilities;
break;
case MSR_IA32_SYSENTER_CS: case MSR_IA32_SYSENTER_CS:
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break; break;
...@@ -3209,11 +3202,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3209,11 +3202,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
MSR_TYPE_W); MSR_TYPE_W);
break; break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated)
return 1;
vmx->arch_capabilities = data;
break;
case MSR_IA32_CR_PAT: case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
...@@ -5277,8 +5265,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -5277,8 +5265,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs; ++vmx->nmsrs;
} }
vmx->arch_capabilities = kvm_get_arch_capabilities();
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */ /* 22.2.1, 20.8.1 */
......
...@@ -2135,6 +2135,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2135,6 +2135,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr_info->host_initiated) if (msr_info->host_initiated)
vcpu->arch.microcode_version = data; vcpu->arch.microcode_version = data;
break; break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated)
return 1;
vcpu->arch.arch_capabilities = data;
break;
case MSR_EFER: case MSR_EFER:
return set_efer(vcpu, msr_info); return set_efer(vcpu, msr_info);
case MSR_K7_HWCR: case MSR_K7_HWCR:
...@@ -2410,6 +2415,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2410,6 +2415,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_REV:
msr_info->data = vcpu->arch.microcode_version; msr_info->data = vcpu->arch.microcode_version;
break; break;
case MSR_IA32_ARCH_CAPABILITIES:
if (!msr_info->host_initiated &&
!guest_cpuid_has_arch_capabilities(vcpu))
return 1;
msr_info->data = vcpu->arch.arch_capabilities;
break;
case MSR_MTRRcap: case MSR_MTRRcap:
case 0x200 ... 0x2ff: case 0x200 ... 0x2ff:
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
...@@ -7556,6 +7567,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -7556,6 +7567,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
int r; int r;
vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
kvm_vcpu_mtrr_init(vcpu); kvm_vcpu_mtrr_init(vcpu);
r = vcpu_load(vcpu); r = vcpu_load(vcpu);
if (r) if (r)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment