Commit 7ff7dfe9 authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/pmevtyper-filter into kvmarm/next

* kvm-arm64/pmevtyper-filter:
  : Fixes to KVM's handling of the PMUv3 exception level filtering bits
  :
  :  - NSH (count at EL2) and M (count at EL3) should be stateful when the
  :    respective EL is advertised in the ID registers but have no effect on
  :    event counting.
  :
  :  - NSU and NSK modify the event filtering of EL0 and EL1, respectively.
  :    Though the kernel may not use these bits, other KVM guests might.
  :    Implement these bits exactly as written in the pseudocode if EL3 is
  :    advertised.
  KVM: arm64: Add PMU event filter bits required if EL3 is implemented
  KVM: arm64: Make PMEVTYPER<n>_EL0.NSH RES0 if EL2 isn't advertised
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents d47dcb67 ae8d3522
......@@ -60,6 +60,23 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
return __kvm_pmu_event_mask(pmuver);
}
u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
{
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
kvm_pmu_event_mask(kvm);
u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
mask |= ARMV8_PMU_INCLUDE_EL2;
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
ARMV8_PMU_EXCLUDE_NS_EL1 |
ARMV8_PMU_EXCLUDE_EL3;
return mask;
}
/**
* kvm_pmc_is_64bit - determine if counter is 64bit
* @pmc: counter context
......@@ -584,6 +601,7 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
struct perf_event *event;
struct perf_event_attr attr;
u64 eventsel, reg, data;
bool p, u, nsk, nsu;
reg = counter_index_to_evtreg(pmc->idx);
data = __vcpu_sys_reg(vcpu, reg);
......@@ -610,13 +628,18 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
!test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
return;
p = data & ARMV8_PMU_EXCLUDE_EL1;
u = data & ARMV8_PMU_EXCLUDE_EL0;
nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1;
nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0;
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.type = arm_pmu->pmu.type;
attr.size = sizeof(attr);
attr.pinned = 1;
attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
attr.exclude_user = (u != nsu);
attr.exclude_kernel = (p != nsk);
attr.exclude_hv = 1; /* Don't count EL2 events */
attr.exclude_host = 1; /* Don't count host events */
attr.config = eventsel;
......@@ -657,18 +680,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx)
{
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
u64 reg, mask;
u64 reg;
if (!kvm_vcpu_has_pmu(vcpu))
return;
mask = ARMV8_PMU_EVTYPE_MASK;
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
mask |= kvm_pmu_event_mask(vcpu->kvm);
reg = counter_index_to_evtreg(pmc->idx);
__vcpu_sys_reg(vcpu, reg) = data & mask;
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
kvm_pmu_create_perf_event(pmc);
}
......
......@@ -746,8 +746,12 @@ static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
/* This thing will UNDEF, who cares about the reset value? */
if (!kvm_vcpu_has_pmu(vcpu))
return 0;
reset_unknown(vcpu, r);
__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
__vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
return __vcpu_sys_reg(vcpu, r->reg);
}
......@@ -988,7 +992,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {
p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
p->regval = __vcpu_sys_reg(vcpu, reg);
}
return true;
......
......@@ -101,6 +101,7 @@ void kvm_vcpu_pmu_resync_el0(void);
})
u8 kvm_arm_pmu_get_pmuver_limit(void);
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
#else
struct kvm_pmu {
......@@ -172,6 +173,10 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
{
return 0;
}
static inline void kvm_vcpu_pmu_resync_el0(void) {}
#endif
......
......@@ -234,9 +234,12 @@
/*
* Event filters for PMUv3
*/
#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29)
#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28)
#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26)
/*
* PMUSERENR: user enable reg
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment