Commit 8e12911b authored by Like Xu's avatar Like Xu Committed by Paolo Bonzini

KVM: vmx/pmu: Create a guest LBR event when vcpu sets DEBUGCTLMSR_LBR

When vcpu sets DEBUGCTLMSR_LBR in the MSR_IA32_DEBUGCTLMSR, the KVM handler
would create a guest LBR event which enables the callstack mode and none of
hardware counter is assigned. The host perf would schedule and enable this
event as usual but in an exclusive way.

The guest LBR event will be released when the vPMU is reset but soon,
the lazy release mechanism would be applied to this event like a vPMC.
Suggested-by: default avatarAndi Kleen <ak@linux.intel.com>
Co-developed-by: default avatarWei Wang <wei.w.wang@intel.com>
Signed-off-by: default avatarWei Wang <wei.w.wang@intel.com>
Signed-off-by: default avatarLike Xu <like.xu@linux.intel.com>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Message-Id: <20210201051039.255478-6-like.xu@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c6462363
...@@ -224,6 +224,66 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) ...@@ -224,6 +224,66 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
return pmc; return pmc;
} }
static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
{
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
if (lbr_desc->event) {
perf_event_release_kernel(lbr_desc->event);
lbr_desc->event = NULL;
vcpu_to_pmu(vcpu)->event_count--;
}
}
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
{
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct perf_event *event;
/*
* The perf_event_attr is constructed in the minimum efficient way:
* - set 'pinned = true' to make it task pinned so that if another
* cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
* - set '.exclude_host = true' to record guest branches behavior;
*
* - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
* schedule the event without a real HW counter but a fake one;
* check is_guest_lbr_event() and __intel_get_event_constraints();
*
* - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
* 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
* PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
* event, which helps KVM to save/restore guest LBR records
* during host context switches and reduces quite a lot overhead,
* check branch_user_callstack() and intel_pmu_lbr_sched_task();
*/
struct perf_event_attr attr = {
.type = PERF_TYPE_RAW,
.size = sizeof(attr),
.config = INTEL_FIXED_VLBR_EVENT,
.sample_type = PERF_SAMPLE_BRANCH_STACK,
.pinned = true,
.exclude_host = true,
.branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
PERF_SAMPLE_BRANCH_USER,
};
if (unlikely(lbr_desc->event))
return 0;
event = perf_event_create_kernel_counter(&attr, -1,
current, NULL, NULL);
if (IS_ERR(event)) {
pr_debug_ratelimited("%s: failed %ld\n",
__func__, PTR_ERR(event));
return -ENOENT;
}
lbr_desc->event = event;
pmu->event_count++;
return 0;
}
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
...@@ -435,6 +495,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) ...@@ -435,6 +495,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
vcpu->arch.perf_capabilities = vmx_get_perf_capabilities(); vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
lbr_desc->records.nr = 0; lbr_desc->records.nr = 0;
lbr_desc->event = NULL;
} }
static void intel_pmu_reset(struct kvm_vcpu *vcpu) static void intel_pmu_reset(struct kvm_vcpu *vcpu)
...@@ -459,6 +520,8 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu) ...@@ -459,6 +520,8 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu->global_ovf_ctrl = 0; pmu->global_ovf_ctrl = 0;
intel_pmu_release_guest_lbr_event(vcpu);
} }
struct kvm_pmu_ops intel_pmu_ops = { struct kvm_pmu_ops intel_pmu_ops = {
......
...@@ -2028,6 +2028,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2028,6 +2028,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
get_vmcs12(vcpu)->guest_ia32_debugctl = data; get_vmcs12(vcpu)->guest_ia32_debugctl = data;
vmcs_write64(GUEST_IA32_DEBUGCTL, data); vmcs_write64(GUEST_IA32_DEBUGCTL, data);
if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event &&
(data & DEBUGCTLMSR_LBR))
intel_pmu_create_guest_lbr_event(vcpu);
return 0; return 0;
} }
case MSR_IA32_BNDCFGS: case MSR_IA32_BNDCFGS:
......
...@@ -99,9 +99,19 @@ union vmx_exit_reason { ...@@ -99,9 +99,19 @@ union vmx_exit_reason {
bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
struct lbr_desc { struct lbr_desc {
/* Basic info about guest LBR records. */ /* Basic info about guest LBR records. */
struct x86_pmu_lbr records; struct x86_pmu_lbr records;
/*
* Emulate LBR feature via passthrough LBR registers when the
* per-vcpu guest LBR event is scheduled on the current pcpu.
*
* The records may be inaccurate if the host reclaims the LBR.
*/
struct perf_event *event;
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment