Commit 46b18782 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: Keep a per-VM pointer to the default PMU

As we are about to allow selection of the PMU exposed to a guest, start by
keeping track of the default one instead of only the PMU version.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarAlexandru Elisei <alexandru.elisei@arm.com>
Link: https://lore.kernel.org/r/20220127161759.53553-4-alexandru.elisei@arm.com
parent 2093057a
...@@ -129,7 +129,7 @@ struct kvm_arch { ...@@ -129,7 +129,7 @@ struct kvm_arch {
* up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
*/ */
unsigned long *pmu_filter; unsigned long *pmu_filter;
unsigned int pmuver; struct arm_pmu *arm_pmu;
u8 pfr0_csv2; u8 pfr0_csv2;
u8 pfr0_csv3; u8 pfr0_csv3;
......
...@@ -24,7 +24,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc); ...@@ -24,7 +24,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
static u32 kvm_pmu_event_mask(struct kvm *kvm) static u32 kvm_pmu_event_mask(struct kvm *kvm)
{ {
switch (kvm->arch.pmuver) { unsigned int pmuver;
pmuver = kvm->arch.arm_pmu->pmuver;
switch (pmuver) {
case ID_AA64DFR0_PMUVER_8_0: case ID_AA64DFR0_PMUVER_8_0:
return GENMASK(9, 0); return GENMASK(9, 0);
case ID_AA64DFR0_PMUVER_8_1: case ID_AA64DFR0_PMUVER_8_1:
...@@ -33,7 +37,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) ...@@ -33,7 +37,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
case ID_AA64DFR0_PMUVER_8_7: case ID_AA64DFR0_PMUVER_8_7:
return GENMASK(15, 0); return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */ default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
return 0; return 0;
} }
} }
...@@ -600,6 +604,7 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) ...@@ -600,6 +604,7 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
*/ */
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
{ {
struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc; struct kvm_pmc *pmc;
struct perf_event *event; struct perf_event *event;
...@@ -636,7 +641,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) ...@@ -636,7 +641,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
return; return;
memset(&attr, 0, sizeof(struct perf_event_attr)); memset(&attr, 0, sizeof(struct perf_event_attr));
attr.type = PERF_TYPE_RAW; attr.type = arm_pmu->pmu.type;
attr.size = sizeof(attr); attr.size = sizeof(attr);
attr.pinned = 1; attr.pinned = 1;
attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
...@@ -750,12 +755,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) ...@@ -750,12 +755,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
static_branch_enable(&kvm_arm_pmu_available); static_branch_enable(&kvm_arm_pmu_available);
} }
static int kvm_pmu_probe_pmuver(void) static struct arm_pmu *kvm_pmu_probe_armpmu(void)
{ {
struct perf_event_attr attr = { }; struct perf_event_attr attr = { };
struct perf_event *event; struct perf_event *event;
struct arm_pmu *pmu; struct arm_pmu *pmu = NULL;
int pmuver = ID_AA64DFR0_PMUVER_IMP_DEF;
/* /*
* Create a dummy event that only counts user cycles. As we'll never * Create a dummy event that only counts user cycles. As we'll never
...@@ -780,19 +784,20 @@ static int kvm_pmu_probe_pmuver(void) ...@@ -780,19 +784,20 @@ static int kvm_pmu_probe_pmuver(void)
if (IS_ERR(event)) { if (IS_ERR(event)) {
pr_err_once("kvm: pmu event creation failed %ld\n", pr_err_once("kvm: pmu event creation failed %ld\n",
PTR_ERR(event)); PTR_ERR(event));
return ID_AA64DFR0_PMUVER_IMP_DEF; return NULL;
} }
if (event->pmu) { if (event->pmu) {
pmu = to_arm_pmu(event->pmu); pmu = to_arm_pmu(event->pmu);
if (pmu->pmuver) if (pmu->pmuver == 0 ||
pmuver = pmu->pmuver; pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
pmu = NULL;
} }
perf_event_disable(event); perf_event_disable(event);
perf_event_release_kernel(event); perf_event_release_kernel(event);
return pmuver; return pmu;
} }
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
...@@ -810,7 +815,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -810,7 +815,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* as RAZ * as RAZ
*/ */
if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4) if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32; base = 32;
} }
...@@ -932,11 +937,16 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -932,11 +937,16 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (vcpu->arch.pmu.created) if (vcpu->arch.pmu.created)
return -EBUSY; return -EBUSY;
if (!vcpu->kvm->arch.pmuver) mutex_lock(&kvm->lock);
vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver(); if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */
if (vcpu->kvm->arch.pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
return -ENODEV; if (!kvm->arch.arm_pmu) {
mutex_unlock(&kvm->lock);
return -ENODEV;
}
}
mutex_unlock(&kvm->lock);
switch (attr->attr) { switch (attr->attr) {
case KVM_ARM_VCPU_PMU_V3_IRQ: { case KVM_ARM_VCPU_PMU_V3_IRQ: {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment