Commit 5fbf9634 authored by Krish Sadhukhan's avatar Krish Sadhukhan Committed by Paolo Bonzini

KVM: nVMX: Move the checks for VM-Entry Control Fields to a separate helper function

.. to improve readability and maintainability, and to align the code as per
the layout of the checks in chapter "VM Entries" in Intel SDM vol 3C.
Signed-off-by: default avatarKrish Sadhukhan <krish.sadhukhan@oracle.com>
Reviewed-by: default avatarMihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: default avatarMark Kanda <mark.kanda@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 61446ba7
...@@ -726,7 +726,7 @@ static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, ...@@ -726,7 +726,7 @@ static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu, static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count, if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
...@@ -2510,47 +2510,18 @@ static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, ...@@ -2510,47 +2510,18 @@ static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, /*
* Checks related to VM-Entry Control Fields
*/
static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
bool ia32e;
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
nested_check_vm_exit_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
if (!vmx_control_verify(vmcs12->vm_entry_controls, if (!vmx_control_verify(vmcs12->vm_entry_controls,
vmx->nested.msrs.entry_ctls_low, vmx->nested.msrs.entry_ctls_low,
vmx->nested.msrs.entry_ctls_high)) vmx->nested.msrs.entry_ctls_high))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD; return -EINVAL;
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,
* the values of the LMA and LME bits in the field must each be that of
* the host address-space size VM-exit control.
*/
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
ia32e = (vmcs12->vm_exit_controls &
VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
}
/* /*
* From the Intel SDM, volume 3: * From the Intel SDM, volume 3:
...@@ -2572,29 +2543,29 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, ...@@ -2572,29 +2543,29 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
if (intr_type == INTR_TYPE_RESERVED || if (intr_type == INTR_TYPE_RESERVED ||
(intr_type == INTR_TYPE_OTHER_EVENT && (intr_type == INTR_TYPE_OTHER_EVENT &&
!nested_cpu_supports_monitor_trap_flag(vcpu))) !nested_cpu_supports_monitor_trap_flag(vcpu)))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD; return -EINVAL;
/* VM-entry interruption-info field: vector */ /* VM-entry interruption-info field: vector */
if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD; return -EINVAL;
/* VM-entry interruption-info field: deliver error code */ /* VM-entry interruption-info field: deliver error code */
should_have_error_code = should_have_error_code =
intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
x86_exception_has_error_code(vector); x86_exception_has_error_code(vector);
if (has_error_code != should_have_error_code) if (has_error_code != should_have_error_code)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD; return -EINVAL;
/* VM-entry exception error code */ /* VM-entry exception error code */
if (has_error_code && if (has_error_code &&
vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD; return -EINVAL;
/* VM-entry interruption-info field: reserved bits */ /* VM-entry interruption-info field: reserved bits */
if (intr_info & INTR_INFO_RESVD_BITS_MASK) if (intr_info & INTR_INFO_RESVD_BITS_MASK)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD; return -EINVAL;
/* VM-entry instruction length */ /* VM-entry instruction length */
switch (intr_type) { switch (intr_type) {
...@@ -2604,8 +2575,48 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu, ...@@ -2604,8 +2575,48 @@ static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
if ((vmcs12->vm_entry_instruction_len > 15) || if ((vmcs12->vm_entry_instruction_len > 15) ||
(vmcs12->vm_entry_instruction_len == 0 && (vmcs12->vm_entry_instruction_len == 0 &&
!nested_cpu_has_zero_length_injection(vcpu))) !nested_cpu_has_zero_length_injection(vcpu)))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD; return -EINVAL;
}
} }
if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
return -EINVAL;
return 0;
}
static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
bool ia32e;
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
nested_check_vm_exit_controls(vcpu, vmcs12) ||
nested_check_vm_entry_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,
* the values of the LMA and LME bits in the field must each be that of
* the host address-space size VM-exit control.
*/
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
ia32e = (vmcs12->vm_exit_controls &
VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment