Commit 446ace4b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Use vpid_sync_context() directly when possible

Use vpid_sync_context() directly for flows that run if and only if
enable_vpid=1, or more specifically, nested VMX flows that are gated by
vmx->nested.msrs.secondary_ctls_high.SECONDARY_EXEC_ENABLE_VPID being
set, which is allowed if and only if enable_vpid=1.  Because these flows
call __vmx_flush_tlb() with @invalidate_gpa=false, the if-statement that
decides between INVEPT and INVVPID will always go down the INVVPID path,
i.e. call vpid_sync_context() because
"enable_ept && (invalidate_gpa || !enable_vpid)" always evaluates false.

This helps pave the way toward removing @invalidate_gpa and @vpid from
__vmx_flush_tlb() and its callers.

Opportunstically drop unnecessary brackets in handle_invvpid() around an
affected __vmx_flush_tlb()->vpid_sync_context() conversion.

No functional change intended.
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-10-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c746b3a4
...@@ -2459,7 +2459,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, ...@@ -2459,7 +2459,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id; vmx->nested.last_vpid = vmcs12->virtual_processor_id;
__vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); vpid_sync_context(nested_get_vpid02(vcpu));
} }
} else { } else {
/* /*
...@@ -5234,21 +5234,21 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) ...@@ -5234,21 +5234,21 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
is_noncanonical_address(operand.gla, vcpu)) is_noncanonical_address(operand.gla, vcpu))
return nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
if (cpu_has_vmx_invvpid_individual_addr()) { if (cpu_has_vmx_invvpid_individual_addr())
__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
vpid02, operand.gla); vpid02, operand.gla);
} else else
__vmx_flush_tlb(vcpu, vpid02, false); vpid_sync_context(vpid02);
break; break;
case VMX_VPID_EXTENT_SINGLE_CONTEXT: case VMX_VPID_EXTENT_SINGLE_CONTEXT:
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
if (!operand.vpid) if (!operand.vpid)
return nested_vmx_failValid(vcpu, return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
__vmx_flush_tlb(vcpu, vpid02, false); vpid_sync_context(vpid02);
break; break;
case VMX_VPID_EXTENT_ALL_CONTEXT: case VMX_VPID_EXTENT_ALL_CONTEXT:
__vmx_flush_tlb(vcpu, vpid02, false); vpid_sync_context(vpid02);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment