Commit 9aa4f622 authored by Like Xu's avatar Like Xu Committed by Paolo Bonzini

KVM: vmx/pmu: Release guest LBR event via lazy release mechanism

The vPMU uses GUEST_LBR_IN_USE_IDX (bit 58) in 'pmu->pmc_in_use' to
indicate whether a guest LBR event is still needed by the vcpu. If the
vcpu no longer accesses LBR related registers within a scheduling time
slice, and the enable bit of LBR has been unset, vPMU will treat the
guest LBR event as a bland event of a vPMC counter and release it
as usual. Also, the pass-through state of LBR records msrs is cancelled.
Signed-off-by: default avatarLike Xu <like.xu@linux.intel.com>
Message-Id: <20210201051039.255478-10-like.xu@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e6209a3b
...@@ -476,6 +476,9 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu) ...@@ -476,6 +476,9 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
pmc_stop_counter(pmc); pmc_stop_counter(pmc);
} }
if (kvm_x86_ops.pmu_ops->cleanup)
kvm_x86_ops.pmu_ops->cleanup(vcpu);
bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
} }
......
...@@ -40,6 +40,7 @@ struct kvm_pmu_ops { ...@@ -40,6 +40,7 @@ struct kvm_pmu_ops {
void (*init)(struct kvm_vcpu *vcpu); void (*init)(struct kvm_vcpu *vcpu);
void (*reset)(struct kvm_vcpu *vcpu); void (*reset)(struct kvm_vcpu *vcpu);
void (*deliver_pmi)(struct kvm_vcpu *vcpu); void (*deliver_pmi)(struct kvm_vcpu *vcpu);
void (*cleanup)(struct kvm_vcpu *vcpu);
}; };
static inline u64 pmc_bitmask(struct kvm_pmc *pmc) static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
......
...@@ -288,8 +288,10 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) ...@@ -288,8 +288,10 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
PERF_SAMPLE_BRANCH_USER, PERF_SAMPLE_BRANCH_USER,
}; };
if (unlikely(lbr_desc->event)) if (unlikely(lbr_desc->event)) {
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
return 0; return 0;
}
event = perf_event_create_kernel_counter(&attr, -1, event = perf_event_create_kernel_counter(&attr, -1,
current, NULL, NULL); current, NULL, NULL);
...@@ -300,6 +302,7 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu) ...@@ -300,6 +302,7 @@ int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
} }
lbr_desc->event = event; lbr_desc->event = event;
pmu->event_count++; pmu->event_count++;
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
return 0; return 0;
} }
...@@ -332,9 +335,11 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, ...@@ -332,9 +335,11 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
rdmsrl(index, msr_info->data); rdmsrl(index, msr_info->data);
else else
wrmsrl(index, msr_info->data); wrmsrl(index, msr_info->data);
__set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
local_irq_enable(); local_irq_enable();
return true; return true;
} }
clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
local_irq_enable(); local_irq_enable();
dummy: dummy:
...@@ -532,6 +537,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -532,6 +537,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
x86_perf_get_lbr(&lbr_desc->records); x86_perf_get_lbr(&lbr_desc->records);
else else
lbr_desc->records.nr = 0; lbr_desc->records.nr = 0;
if (lbr_desc->records.nr)
bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
} }
static void intel_pmu_init(struct kvm_vcpu *vcpu) static void intel_pmu_init(struct kvm_vcpu *vcpu)
...@@ -665,17 +673,21 @@ static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu) ...@@ -665,17 +673,21 @@ static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
*/ */
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
if (!lbr_desc->event) { if (!lbr_desc->event) {
vmx_disable_lbr_msrs_passthrough(vcpu); vmx_disable_lbr_msrs_passthrough(vcpu);
if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR) if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
goto warn; goto warn;
if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
goto warn;
return; return;
} }
if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) { if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
vmx_disable_lbr_msrs_passthrough(vcpu); vmx_disable_lbr_msrs_passthrough(vcpu);
__clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
goto warn; goto warn;
} else } else
vmx_enable_lbr_msrs_passthrough(vcpu); vmx_enable_lbr_msrs_passthrough(vcpu);
...@@ -687,6 +699,12 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu) ...@@ -687,6 +699,12 @@ void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
vcpu->vcpu_id); vcpu->vcpu_id);
} }
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
{
if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
intel_pmu_release_guest_lbr_event(vcpu);
}
struct kvm_pmu_ops intel_pmu_ops = { struct kvm_pmu_ops intel_pmu_ops = {
.find_arch_event = intel_find_arch_event, .find_arch_event = intel_find_arch_event,
.find_fixed_event = intel_find_fixed_event, .find_fixed_event = intel_find_fixed_event,
...@@ -702,4 +720,5 @@ struct kvm_pmu_ops intel_pmu_ops = { ...@@ -702,4 +720,5 @@ struct kvm_pmu_ops intel_pmu_ops = {
.init = intel_pmu_init, .init = intel_pmu_init,
.reset = intel_pmu_reset, .reset = intel_pmu_reset,
.deliver_pmi = intel_pmu_deliver_pmi, .deliver_pmi = intel_pmu_deliver_pmi,
.cleanup = intel_pmu_cleanup,
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment