Commit ac6389ab authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: nVMX: Rename EPTP validity helper and associated variables

Rename valid_ept_address() to nested_vmx_check_eptp() to follow the nVMX
nomenclature and to reflect that the function now checks a lot more than
just the address contained in the EPTP.  Rename address to new_eptp in
associated code.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ac69dfaa
...@@ -2563,13 +2563,13 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) ...@@ -2563,13 +2563,13 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
return 0; return 0;
} }
static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
int maxphyaddr = cpuid_maxphyaddr(vcpu); int maxphyaddr = cpuid_maxphyaddr(vcpu);
/* Check for memory type validity */ /* Check for memory type validity */
switch (address & VMX_EPTP_MT_MASK) { switch (new_eptp & VMX_EPTP_MT_MASK) {
case VMX_EPTP_MT_UC: case VMX_EPTP_MT_UC:
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
return false; return false;
...@@ -2583,7 +2583,7 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) ...@@ -2583,7 +2583,7 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
} }
/* Page-walk levels validity. */ /* Page-walk levels validity. */
switch (address & VMX_EPTP_PWL_MASK) { switch (new_eptp & VMX_EPTP_PWL_MASK) {
case VMX_EPTP_PWL_5: case VMX_EPTP_PWL_5:
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
return false; return false;
...@@ -2597,11 +2597,11 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address) ...@@ -2597,11 +2597,11 @@ static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
} }
/* Reserved bits should not be set */ /* Reserved bits should not be set */
if (CC(address >> maxphyaddr || ((address >> 7) & 0x1f))) if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f)))
return false; return false;
/* AD, if set, should be supported */ /* AD, if set, should be supported */
if (address & VMX_EPTP_AD_ENABLE_BIT) { if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
return false; return false;
} }
...@@ -2650,7 +2650,7 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, ...@@ -2650,7 +2650,7 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
if (nested_cpu_has_ept(vmcs12) && if (nested_cpu_has_ept(vmcs12) &&
CC(!valid_ept_address(vcpu, vmcs12->ept_pointer))) CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
return -EINVAL; return -EINVAL;
if (nested_cpu_has_vmfunc(vmcs12)) { if (nested_cpu_has_vmfunc(vmcs12)) {
...@@ -5234,7 +5234,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, ...@@ -5234,7 +5234,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12) struct vmcs12 *vmcs12)
{ {
u32 index = kvm_rcx_read(vcpu); u32 index = kvm_rcx_read(vcpu);
u64 address; u64 new_eptp;
bool accessed_dirty; bool accessed_dirty;
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
...@@ -5247,23 +5247,23 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, ...@@ -5247,23 +5247,23 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
&address, index * 8, 8)) &new_eptp, index * 8, 8))
return 1; return 1;
accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT); accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
/* /*
* If the (L2) guest does a vmfunc to the currently * If the (L2) guest does a vmfunc to the currently
* active ept pointer, we don't have to do anything else * active ept pointer, we don't have to do anything else
*/ */
if (vmcs12->ept_pointer != address) { if (vmcs12->ept_pointer != new_eptp) {
if (!valid_ept_address(vcpu, address)) if (!nested_vmx_check_eptp(vcpu, new_eptp))
return 1; return 1;
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
mmu->ept_ad = accessed_dirty; mmu->ept_ad = accessed_dirty;
mmu->mmu_role.base.ad_disabled = !accessed_dirty; mmu->mmu_role.base.ad_disabled = !accessed_dirty;
vmcs12->ept_pointer = address; vmcs12->ept_pointer = new_eptp;
/* /*
* TODO: Check what's the correct approach in case * TODO: Check what's the correct approach in case
* mmu reload fails. Currently, we just let the next * mmu reload fails. Currently, we just let the next
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment