Commit 2ef7619d authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: VMX: Add helpers to query Intel PT mode

Add helpers to query which of the (two) supported PT modes is active.
The primary motivation is to help document that there is a third PT mode
(host-only) that's currently not supported by KVM.  As is, it's not
obvious that PT_MODE_SYSTEM != !PT_MODE_HOST_GUEST and vice versa, e.g.
that "pt_mode == PT_MODE_SYSTEM" and "pt_mode != PT_MODE_HOST_GUEST" are
two distinct checks.

No functional change intended.
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0eee8f9d
...@@ -354,4 +354,22 @@ static inline bool cpu_has_vmx_intel_pt(void) ...@@ -354,4 +354,22 @@ static inline bool cpu_has_vmx_intel_pt(void)
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL); (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL);
} }
/*
* Processor Trace can operate in one of three modes:
* a. system-wide: trace both host/guest and output to host buffer
* b. host-only: only trace host and output to host buffer
* c. host-guest: trace host and guest simultaneously and output to their
* respective buffer
*
* KVM currently only supports (a) and (c).
*/
static inline bool vmx_pt_mode_is_system(void)
{
return pt_mode == PT_MODE_SYSTEM;
}
static inline bool vmx_pt_mode_is_host_guest(void)
{
return pt_mode == PT_MODE_HOST_GUEST;
}
#endif /* __KVM_X86_VMX_CAPS_H */ #endif /* __KVM_X86_VMX_CAPS_H */
...@@ -4602,7 +4602,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) ...@@ -4602,7 +4602,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
vmx->nested.vmcs02_initialized = false; vmx->nested.vmcs02_initialized = false;
vmx->nested.vmxon = true; vmx->nested.vmxon = true;
if (pt_mode == PT_MODE_HOST_GUEST) { if (vmx_pt_mode_is_host_guest()) {
vmx->pt_desc.guest.ctl = 0; vmx->pt_desc.guest.ctl = 0;
pt_update_intercept_for_msr(vmx); pt_update_intercept_for_msr(vmx);
} }
......
...@@ -1059,7 +1059,7 @@ static unsigned long segment_base(u16 selector) ...@@ -1059,7 +1059,7 @@ static unsigned long segment_base(u16 selector)
static inline bool pt_can_write_msr(struct vcpu_vmx *vmx) static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
{ {
return (pt_mode == PT_MODE_HOST_GUEST) && return vmx_pt_mode_is_host_guest() &&
!(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
} }
...@@ -1093,7 +1093,7 @@ static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range) ...@@ -1093,7 +1093,7 @@ static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
static void pt_guest_enter(struct vcpu_vmx *vmx) static void pt_guest_enter(struct vcpu_vmx *vmx)
{ {
if (pt_mode == PT_MODE_SYSTEM) if (vmx_pt_mode_is_system())
return; return;
/* /*
...@@ -1110,7 +1110,7 @@ static void pt_guest_enter(struct vcpu_vmx *vmx) ...@@ -1110,7 +1110,7 @@ static void pt_guest_enter(struct vcpu_vmx *vmx)
static void pt_guest_exit(struct vcpu_vmx *vmx) static void pt_guest_exit(struct vcpu_vmx *vmx)
{ {
if (pt_mode == PT_MODE_SYSTEM) if (vmx_pt_mode_is_system())
return; return;
if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
...@@ -1904,24 +1904,24 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1904,24 +1904,24 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
&msr_info->data); &msr_info->data);
break; break;
case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_CTL:
if (pt_mode != PT_MODE_HOST_GUEST) if (!vmx_pt_mode_is_host_guest())
return 1; return 1;
msr_info->data = vmx->pt_desc.guest.ctl; msr_info->data = vmx->pt_desc.guest.ctl;
break; break;
case MSR_IA32_RTIT_STATUS: case MSR_IA32_RTIT_STATUS:
if (pt_mode != PT_MODE_HOST_GUEST) if (!vmx_pt_mode_is_host_guest())
return 1; return 1;
msr_info->data = vmx->pt_desc.guest.status; msr_info->data = vmx->pt_desc.guest.status;
break; break;
case MSR_IA32_RTIT_CR3_MATCH: case MSR_IA32_RTIT_CR3_MATCH:
if ((pt_mode != PT_MODE_HOST_GUEST) || if (!vmx_pt_mode_is_host_guest() ||
!intel_pt_validate_cap(vmx->pt_desc.caps, !intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_cr3_filtering)) PT_CAP_cr3_filtering))
return 1; return 1;
msr_info->data = vmx->pt_desc.guest.cr3_match; msr_info->data = vmx->pt_desc.guest.cr3_match;
break; break;
case MSR_IA32_RTIT_OUTPUT_BASE: case MSR_IA32_RTIT_OUTPUT_BASE:
if ((pt_mode != PT_MODE_HOST_GUEST) || if (!vmx_pt_mode_is_host_guest() ||
(!intel_pt_validate_cap(vmx->pt_desc.caps, (!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_topa_output) && PT_CAP_topa_output) &&
!intel_pt_validate_cap(vmx->pt_desc.caps, !intel_pt_validate_cap(vmx->pt_desc.caps,
...@@ -1930,7 +1930,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1930,7 +1930,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmx->pt_desc.guest.output_base; msr_info->data = vmx->pt_desc.guest.output_base;
break; break;
case MSR_IA32_RTIT_OUTPUT_MASK: case MSR_IA32_RTIT_OUTPUT_MASK:
if ((pt_mode != PT_MODE_HOST_GUEST) || if (!vmx_pt_mode_is_host_guest() ||
(!intel_pt_validate_cap(vmx->pt_desc.caps, (!intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_topa_output) && PT_CAP_topa_output) &&
!intel_pt_validate_cap(vmx->pt_desc.caps, !intel_pt_validate_cap(vmx->pt_desc.caps,
...@@ -1940,7 +1940,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1940,7 +1940,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break; break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
if ((pt_mode != PT_MODE_HOST_GUEST) || if (!vmx_pt_mode_is_host_guest() ||
(index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
PT_CAP_num_address_ranges))) PT_CAP_num_address_ranges)))
return 1; return 1;
...@@ -2146,7 +2146,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2146,7 +2146,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1; return 1;
return vmx_set_vmx_msr(vcpu, msr_index, data); return vmx_set_vmx_msr(vcpu, msr_index, data);
case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_CTL:
if ((pt_mode != PT_MODE_HOST_GUEST) || if (!vmx_pt_mode_is_host_guest() ||
vmx_rtit_ctl_check(vcpu, data) || vmx_rtit_ctl_check(vcpu, data) ||
vmx->nested.vmxon) vmx->nested.vmxon)
return 1; return 1;
...@@ -4023,7 +4023,7 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) ...@@ -4023,7 +4023,7 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
if (pt_mode == PT_MODE_SYSTEM) if (vmx_pt_mode_is_system())
exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX); exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
if (!cpu_need_virtualize_apic_accesses(vcpu)) if (!cpu_need_virtualize_apic_accesses(vcpu))
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
...@@ -4264,7 +4264,7 @@ static void init_vmcs(struct vcpu_vmx *vmx) ...@@ -4264,7 +4264,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
if (cpu_has_vmx_encls_vmexit()) if (cpu_has_vmx_encls_vmexit())
vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
if (pt_mode == PT_MODE_HOST_GUEST) { if (vmx_pt_mode_is_host_guest()) {
memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
/* Bit[6~0] are forced to 1, writes are ignored. */ /* Bit[6~0] are forced to 1, writes are ignored. */
vmx->pt_desc.guest.output_mask = 0x7F; vmx->pt_desc.guest.output_mask = 0x7F;
...@@ -6318,7 +6318,7 @@ static bool vmx_has_emulated_msr(int index) ...@@ -6318,7 +6318,7 @@ static bool vmx_has_emulated_msr(int index)
static bool vmx_pt_supported(void) static bool vmx_pt_supported(void)
{ {
return pt_mode == PT_MODE_HOST_GUEST; return vmx_pt_mode_is_host_guest();
} }
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
......
...@@ -452,7 +452,7 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) ...@@ -452,7 +452,7 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
static inline u32 vmx_vmentry_ctrl(void) static inline u32 vmx_vmentry_ctrl(void)
{ {
u32 vmentry_ctrl = vmcs_config.vmentry_ctrl; u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
if (pt_mode == PT_MODE_SYSTEM) if (vmx_pt_mode_is_system())
vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
VM_ENTRY_LOAD_IA32_RTIT_CTL); VM_ENTRY_LOAD_IA32_RTIT_CTL);
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
...@@ -463,7 +463,7 @@ static inline u32 vmx_vmentry_ctrl(void) ...@@ -463,7 +463,7 @@ static inline u32 vmx_vmentry_ctrl(void)
static inline u32 vmx_vmexit_ctrl(void) static inline u32 vmx_vmexit_ctrl(void)
{ {
u32 vmexit_ctrl = vmcs_config.vmexit_ctrl; u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
if (pt_mode == PT_MODE_SYSTEM) if (vmx_pt_mode_is_system())
vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
VM_EXIT_CLEAR_IA32_RTIT_CTL); VM_EXIT_CLEAR_IA32_RTIT_CTL);
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */ /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment