Commit 6d396b55 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: advertise KVM_CAP_X86_SMM

... and we're done. :)

Because SMBASE is usually relocated above 1M on modern chipsets, and
SMM handlers might indeed rely on 4G segment limits, we only expose it
if KVM is able to run the guest in big real mode.  This includes any
of VMX+emulate_invalid_guest_state, VMX+unrestricted_guest, or SVM.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 699023e2
...@@ -709,6 +709,7 @@ struct kvm_x86_ops { ...@@ -709,6 +709,7 @@ struct kvm_x86_ops {
int (*hardware_setup)(void); /* __init */ int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */ void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void); bool (*cpu_has_accelerated_tpr)(void);
bool (*cpu_has_high_real_mode_segbase)(void);
void (*cpuid_update)(struct kvm_vcpu *vcpu); void (*cpuid_update)(struct kvm_vcpu *vcpu);
/* Create, but do not attach this VCPU */ /* Create, but do not attach this VCPU */
......
...@@ -4080,6 +4080,11 @@ static bool svm_cpu_has_accelerated_tpr(void) ...@@ -4080,6 +4080,11 @@ static bool svm_cpu_has_accelerated_tpr(void)
return false; return false;
} }
static bool svm_has_high_real_mode_segbase(void)
{
return true;
}
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{ {
return 0; return 0;
...@@ -4353,6 +4358,7 @@ static struct kvm_x86_ops svm_x86_ops = { ...@@ -4353,6 +4358,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.hardware_enable = svm_hardware_enable, .hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable, .hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
.vcpu_create = svm_create_vcpu, .vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu, .vcpu_free = svm_free_vcpu,
......
...@@ -8139,6 +8139,11 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) ...@@ -8139,6 +8139,11 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
local_irq_enable(); local_irq_enable();
} }
static bool vmx_has_high_real_mode_segbase(void)
{
return enable_unrestricted_guest || emulate_invalid_guest_state;
}
static bool vmx_mpx_supported(void) static bool vmx_mpx_supported(void)
{ {
return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) && return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
...@@ -10296,6 +10301,7 @@ static struct kvm_x86_ops vmx_x86_ops = { ...@@ -10296,6 +10301,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.hardware_enable = hardware_enable, .hardware_enable = hardware_enable,
.hardware_disable = hardware_disable, .hardware_disable = hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority, .cpu_has_accelerated_tpr = report_flexpriority,
.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
.vcpu_create = vmx_create_vcpu, .vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu, .vcpu_free = vmx_free_vcpu,
......
...@@ -2900,6 +2900,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -2900,6 +2900,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#endif #endif
r = 1; r = 1;
break; break;
case KVM_CAP_X86_SMM:
/* SMBASE is usually relocated above 1M on modern chipsets,
* and SMM handlers might indeed rely on 4G segment limits,
* so do not report SMM to be available if real mode is
* emulated via vm86 mode. Still, do not go to great lengths
* to avoid userspace's usage of the feature, because it is a
* fringe case that is not enabled except via specific settings
* of the module parameters.
*/
r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET; r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break; break;
...@@ -4299,6 +4310,10 @@ static void kvm_init_msr_list(void) ...@@ -4299,6 +4310,10 @@ static void kvm_init_msr_list(void)
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
switch (emulated_msrs[i]) { switch (emulated_msrs[i]) {
case MSR_IA32_SMBASE:
if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
continue;
break;
default: default:
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment