Commit 5719455f authored by Tom Lendacky's avatar Tom Lendacky Committed by Paolo Bonzini

KVM: SVM: Do not report support for SMM for an SEV-ES guest

SEV-ES guests do not currently support SMM. Update the has_emulated_msr()
kvm_x86_ops function to take a struct kvm parameter so that the capability
can be reported at a VM level.

Since this op is also called during KVM initialization and before a struct
kvm instance is available, comments will be added to each implementation
of has_emulated_msr() to indicate the kvm parameter can be null.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Message-Id: <75de5138e33b945d2fb17f81ae507bda381808e3.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5265713a
...@@ -1092,7 +1092,7 @@ struct kvm_x86_ops { ...@@ -1092,7 +1092,7 @@ struct kvm_x86_ops {
void (*hardware_disable)(void); void (*hardware_disable)(void);
void (*hardware_unsetup)(void); void (*hardware_unsetup)(void);
bool (*cpu_has_accelerated_tpr)(void); bool (*cpu_has_accelerated_tpr)(void);
bool (*has_emulated_msr)(u32 index); bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu); void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
unsigned int vm_size; unsigned int vm_size;
......
...@@ -3878,12 +3878,21 @@ static bool svm_cpu_has_accelerated_tpr(void) ...@@ -3878,12 +3878,21 @@ static bool svm_cpu_has_accelerated_tpr(void)
return false; return false;
} }
static bool svm_has_emulated_msr(u32 index) /*
* The kvm parameter can be NULL (module initialization, or invocation before
* VM creation). Be sure to check the kvm parameter before using it.
*/
static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
{ {
switch (index) { switch (index) {
case MSR_IA32_MCG_EXT_CTL: case MSR_IA32_MCG_EXT_CTL:
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
return false; return false;
case MSR_IA32_SMBASE:
/* SEV-ES guests do not support SMM, so report false */
if (kvm && sev_es_guest(kvm))
return false;
break;
default: default:
break; break;
} }
......
...@@ -6379,7 +6379,11 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) ...@@ -6379,7 +6379,11 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
handle_exception_nmi_irqoff(vmx); handle_exception_nmi_irqoff(vmx);
} }
static bool vmx_has_emulated_msr(u32 index) /*
* The kvm parameter can be NULL (module initialization, or invocation before
* VM creation). Be sure to check the kvm parameter before using it.
*/
static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
{ {
switch (index) { switch (index) {
case MSR_IA32_SMBASE: case MSR_IA32_SMBASE:
......
...@@ -3783,7 +3783,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -3783,7 +3783,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* fringe case that is not enabled except via specific settings * fringe case that is not enabled except via specific settings
* of the module parameters. * of the module parameters.
*/ */
r = kvm_x86_ops.has_emulated_msr(MSR_IA32_SMBASE); r = kvm_x86_ops.has_emulated_msr(kvm, MSR_IA32_SMBASE);
break; break;
case KVM_CAP_VAPIC: case KVM_CAP_VAPIC:
r = !kvm_x86_ops.cpu_has_accelerated_tpr(); r = !kvm_x86_ops.cpu_has_accelerated_tpr();
...@@ -5782,7 +5782,7 @@ static void kvm_init_msr_list(void) ...@@ -5782,7 +5782,7 @@ static void kvm_init_msr_list(void)
} }
for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
if (!kvm_x86_ops.has_emulated_msr(emulated_msrs_all[i])) if (!kvm_x86_ops.has_emulated_msr(NULL, emulated_msrs_all[i]))
continue; continue;
emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment