Commit f9b245e1 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Remove param indirection from nested_vmx_check_msr_switch()

Passing the enum and doing an indirect lookup is silly when we can
simply pass the field directly.  Remove the "fast path" code in
nested_vmx_check_msr_switch_controls() as it's now nothing more than a
redundant check.

Remove the debug message rather than continue passing the enum for the
address field.  Having debug messages for the MSRs themselves is useful
as MSR legality is a huge space, whereas messing up a physical address
means the VMM is fundamentally broken.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 461b4ba4
...@@ -700,45 +700,31 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, ...@@ -700,45 +700,31 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
} }
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
unsigned long count_field, u32 count, u64 addr)
unsigned long addr_field)
{ {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
int maxphyaddr; int maxphyaddr;
u64 count, addr;
if (vmcs12_read_any(vmcs12, count_field, &count) ||
vmcs12_read_any(vmcs12, addr_field, &addr)) {
WARN_ON(1);
return -EINVAL;
}
if (count == 0) if (count == 0)
return 0; return 0;
maxphyaddr = cpuid_maxphyaddr(vcpu); maxphyaddr = cpuid_maxphyaddr(vcpu);
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) { (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
pr_debug_ratelimited(
"nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
addr_field, maxphyaddr, count, addr);
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
if (vmcs12->vm_exit_msr_load_count == 0 && if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
vmcs12->vm_exit_msr_store_count == 0 && vmcs12->vm_exit_msr_load_addr) ||
vmcs12->vm_entry_msr_load_count == 0) nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
return 0; /* Fast path */ vmcs12->vm_exit_msr_store_addr) ||
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT, nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
VM_EXIT_MSR_LOAD_ADDR) || vmcs12->vm_entry_msr_load_addr))
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
VM_EXIT_MSR_STORE_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
VM_ENTRY_MSR_LOAD_ADDR))
return -EINVAL; return -EINVAL;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment