Commit 123f42f0 authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/pmu_pmcr_n into kvmarm/next

* kvm-arm64/pmu_pmcr_n:
  : User-defined PMC limit, courtesy Raghavendra Rao Ananta
  :
  : Certain VMMs may want to reserve some PMCs for host use while running a
  : KVM guest. This was a bit difficult before, as KVM advertised all
  : supported counters to the guest. Userspace can now limit the number of
  : advertised PMCs by writing to PMCR_EL0.N, as KVM's sysreg and PMU
  : emulation enforce the specified limit for handling guest accesses.
  KVM: selftests: aarch64: vPMU test for validating user accesses
  KVM: selftests: aarch64: vPMU register test for unimplemented counters
  KVM: selftests: aarch64: vPMU register test for implemented counters
  KVM: selftests: aarch64: Introduce vpmu_counter_access test
  tools: Import arm_pmuv3.h
  KVM: arm64: PMU: Allow userspace to limit PMCR_EL0.N for the guest
  KVM: arm64: Sanitize PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} before first run
  KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}
  KVM: arm64: PMU: Set PMCR_EL0.N for vCPU based on the associated PMU
  KVM: arm64: PMU: Add a helper to read a vCPU's PMCR_EL0
  KVM: arm64: Select default PMU in KVM_ARM_VCPU_INIT handler
  KVM: arm64: PMU: Introduce helpers to set the guest's PMU
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents 53ce49ea 62708be3
...@@ -290,6 +290,9 @@ struct kvm_arch { ...@@ -290,6 +290,9 @@ struct kvm_arch {
cpumask_var_t supported_cpus; cpumask_var_t supported_cpus;
/* PMCR_EL0.N value for the guest */
u8 pmcr_n;
/* Hypercall features firmware registers' descriptor */ /* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat; struct kvm_smccc_features smccc_feat;
struct maple_tree smccc_filter; struct maple_tree smccc_filter;
......
...@@ -857,8 +857,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) ...@@ -857,8 +857,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
} }
if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_reload_pmu(vcpu);
__vcpu_sys_reg(vcpu, PMCR_EL0));
if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
kvm_vcpu_pmu_restore_guest(vcpu); kvm_vcpu_pmu_restore_guest(vcpu);
...@@ -1319,6 +1318,21 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, ...@@ -1319,6 +1318,21 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
KVM_VCPU_MAX_FEATURES); KVM_VCPU_MAX_FEATURES);
} }
static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
int ret = 0;
/*
* When the vCPU has a PMU, but no PMU is set for the guest
* yet, set the default one.
*/
if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
ret = kvm_arm_set_default_pmu(kvm);
return ret;
}
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init) const struct kvm_vcpu_init *init)
{ {
...@@ -1334,6 +1348,10 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, ...@@ -1334,6 +1348,10 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
ret = kvm_setup_vcpu(vcpu);
if (ret)
goto out_unlock;
/* Now we know what it is, we can reset it. */ /* Now we know what it is, we can reset it. */
kvm_reset_vcpu(vcpu); kvm_reset_vcpu(vcpu);
......
...@@ -89,7 +89,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) ...@@ -89,7 +89,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
{ {
u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0); u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
(pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
...@@ -267,7 +267,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -267,7 +267,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
{ {
u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
val &= ARMV8_PMU_PMCR_N_MASK; val &= ARMV8_PMU_PMCR_N_MASK;
if (val == 0) if (val == 0)
...@@ -289,7 +289,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) ...@@ -289,7 +289,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
if (!kvm_vcpu_has_pmu(vcpu)) if (!kvm_vcpu_has_pmu(vcpu))
return; return;
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
return; return;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
...@@ -341,7 +341,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) ...@@ -341,7 +341,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{ {
u64 reg = 0; u64 reg = 0;
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
...@@ -443,7 +443,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, ...@@ -443,7 +443,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
{ {
int i; int i;
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
return; return;
/* Weed out disabled counters */ /* Weed out disabled counters */
...@@ -586,7 +586,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) ...@@ -586,7 +586,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
{ {
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)); (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
} }
...@@ -735,10 +735,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) ...@@ -735,10 +735,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
* It is still necessary to get a valid cpu, though, to probe for the * It is still necessary to get a valid cpu, though, to probe for the
* default PMU instance as userspace is not required to specify a PMU * default PMU instance as userspace is not required to specify a PMU
* type. In order to uphold the preexisting behavior KVM selects the * type. In order to uphold the preexisting behavior KVM selects the
* PMU instance for the core where the first call to the * PMU instance for the core during vcpu init. A dependent use
* KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case * case would be a user with disdain of all things big.LITTLE that
* would be a user with disdain of all things big.LITTLE that affines * affines the VMM to a particular cluster of cores.
* the VMM to a particular cluster of cores.
* *
* In any case, userspace should just do the sane thing and use the UAPI * In any case, userspace should just do the sane thing and use the UAPI
* to select a PMU type directly. But, be wary of the baggage being * to select a PMU type directly. But, be wary of the baggage being
...@@ -804,6 +803,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -804,6 +803,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
return val & mask; return val & mask;
} }
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
{
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
}
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{ {
if (!kvm_vcpu_has_pmu(vcpu)) if (!kvm_vcpu_has_pmu(vcpu))
...@@ -892,6 +902,52 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) ...@@ -892,6 +902,52 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
return true; return true;
} }
/**
* kvm_arm_pmu_get_max_counters - Return the max number of PMU counters.
* @kvm: The kvm pointer
*/
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
{
struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
/*
* The arm_pmu->num_events considers the cycle counter as well.
* Ignore that and return only the general-purpose counters.
*/
return arm_pmu->num_events - 1;
}
static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
{
lockdep_assert_held(&kvm->arch.config_lock);
kvm->arch.arm_pmu = arm_pmu;
kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm);
}
/**
* kvm_arm_set_default_pmu - No PMU set, get the default one.
* @kvm: The kvm pointer
*
* The observant among you will notice that the supported_cpus
* mask does not get updated for the default PMU even though it
* is quite possible the selected instance supports only a
* subset of cores in the system. This is intentional, and
* upholds the preexisting behavior on heterogeneous systems
* where vCPUs can be scheduled on any core but the guest
* counters could stop working.
*/
int kvm_arm_set_default_pmu(struct kvm *kvm)
{
struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
if (!arm_pmu)
return -ENODEV;
kvm_arm_set_pmu(kvm, arm_pmu);
return 0;
}
static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -911,7 +967,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) ...@@ -911,7 +967,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
break; break;
} }
kvm->arch.arm_pmu = arm_pmu; kvm_arm_set_pmu(kvm, arm_pmu);
cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
ret = 0; ret = 0;
break; break;
...@@ -934,23 +990,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -934,23 +990,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (vcpu->arch.pmu.created) if (vcpu->arch.pmu.created)
return -EBUSY; return -EBUSY;
if (!kvm->arch.arm_pmu) {
/*
* No PMU set, get the default one.
*
* The observant among you will notice that the supported_cpus
* mask does not get updated for the default PMU even though it
* is quite possible the selected instance supports only a
* subset of cores in the system. This is intentional, and
* upholds the preexisting behavior on heterogeneous systems
* where vCPUs can be scheduled on any core but the guest
* counters could stop working.
*/
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
if (!kvm->arch.arm_pmu)
return -ENODEV;
}
switch (attr->attr) { switch (attr->attr) {
case KVM_ARM_VCPU_PMU_V3_IRQ: { case KVM_ARM_VCPU_PMU_V3_IRQ: {
int __user *uaddr = (int __user *)(long)attr->addr; int __user *uaddr = (int __user *)(long)attr->addr;
...@@ -1090,3 +1129,15 @@ u8 kvm_arm_pmu_get_pmuver_limit(void) ...@@ -1090,3 +1129,15 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
ID_AA64DFR0_EL1_PMUVer_V3P5); ID_AA64DFR0_EL1_PMUVer_V3P5);
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp); return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
} }
/**
* kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
* @vcpu: The vcpu pointer
*/
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
{
u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) &
~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
}
...@@ -719,14 +719,9 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, ...@@ -719,14 +719,9 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{ {
u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX); u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
u8 n = vcpu->kvm->arch.pmcr_n;
/* No PMU available, any PMU reg may UNDEF... */
if (!kvm_arm_support_pmu_v3())
return 0;
n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
n &= ARMV8_PMU_PMCR_N_MASK;
if (n) if (n)
mask |= GENMASK(n - 1, 0); mask |= GENMASK(n - 1, 0);
...@@ -766,17 +761,15 @@ static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) ...@@ -766,17 +761,15 @@ static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{ {
u64 pmcr; u64 pmcr = 0;
/* No PMU available, PMCR_EL0 may UNDEF... */
if (!kvm_arm_support_pmu_v3())
return 0;
/* Only preserve PMCR_EL0.N, and reset the rest to 0 */
pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
if (!kvm_supports_32bit_el0()) if (!kvm_supports_32bit_el0())
pmcr |= ARMV8_PMU_PMCR_LC; pmcr |= ARMV8_PMU_PMCR_LC;
/*
* The value of PMCR.N field is included when the
* vCPU register is read via kvm_vcpu_read_pmcr().
*/
__vcpu_sys_reg(vcpu, r->reg) = pmcr; __vcpu_sys_reg(vcpu, r->reg) = pmcr;
return __vcpu_sys_reg(vcpu, r->reg); return __vcpu_sys_reg(vcpu, r->reg);
...@@ -826,7 +819,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -826,7 +819,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Only update writeable bits of PMCR (continuing into * Only update writeable bits of PMCR (continuing into
* kvm_pmu_handle_pmcr() as well) * kvm_pmu_handle_pmcr() as well)
*/ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0); val = kvm_vcpu_read_pmcr(vcpu);
val &= ~ARMV8_PMU_PMCR_MASK; val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK;
if (!kvm_supports_32bit_el0()) if (!kvm_supports_32bit_el0())
...@@ -834,7 +827,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -834,7 +827,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
kvm_pmu_handle_pmcr(vcpu, val); kvm_pmu_handle_pmcr(vcpu, val);
} else { } else {
/* PMCR.P & PMCR.C are RAZ */ /* PMCR.P & PMCR.C are RAZ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0) val = kvm_vcpu_read_pmcr(vcpu)
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
p->regval = val; p->regval = val;
} }
...@@ -883,7 +876,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) ...@@ -883,7 +876,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
{ {
u64 pmcr, val; u64 pmcr, val;
pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); pmcr = kvm_vcpu_read_pmcr(vcpu);
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
...@@ -998,6 +991,39 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -998,6 +991,39 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true; return true;
} }
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
{
bool set;
val &= kvm_pmu_valid_counter_mask(vcpu);
switch (r->reg) {
case PMOVSSET_EL0:
/* CRm[1] being set indicates a SET register, and CLR otherwise */
set = r->CRm & 2;
break;
default:
/* Op2[0] being set indicates a SET register, and CLR otherwise */
set = r->Op2 & 1;
break;
}
if (set)
__vcpu_sys_reg(vcpu, r->reg) |= val;
else
__vcpu_sys_reg(vcpu, r->reg) &= ~val;
return 0;
}
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
{
u64 mask = kvm_pmu_valid_counter_mask(vcpu);
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
return 0;
}
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
...@@ -1107,6 +1133,51 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -1107,6 +1133,51 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true; return true;
} }
static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
*val = kvm_vcpu_read_pmcr(vcpu);
return 0;
}
static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 val)
{
u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
struct kvm *kvm = vcpu->kvm;
mutex_lock(&kvm->arch.config_lock);
/*
* The vCPU can't have more counters than the PMU hardware
* implements. Ignore this error to maintain compatibility
* with the existing KVM behavior.
*/
if (!kvm_vm_has_ran_once(kvm) &&
new_n <= kvm_arm_pmu_get_max_counters(kvm))
kvm->arch.pmcr_n = new_n;
mutex_unlock(&kvm->arch.config_lock);
/*
* Ignore writes to RES0 bits, read only bits that are cleared on
* vCPU reset, and writable bits that KVM doesn't support yet.
* (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
* The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
* But, we leave the bit as it is here, as the vCPU's PMUver might
* be changed later (NOTE: the bit will be cleared on first vCPU run
* if necessary).
*/
val &= ARMV8_PMU_PMCR_MASK;
/* The LC bit is RES1 when AArch32 is not supported */
if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = val;
return 0;
}
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
...@@ -2188,9 +2259,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -2188,9 +2259,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* PMBIDR_EL1 is not trapped */ /* PMBIDR_EL1 is not trapped */
{ PMU_SYS_REG(PMINTENSET_EL1), { PMU_SYS_REG(PMINTENSET_EL1),
.access = access_pminten, .reg = PMINTENSET_EL1 }, .access = access_pminten, .reg = PMINTENSET_EL1,
.get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMINTENCLR_EL1), { PMU_SYS_REG(PMINTENCLR_EL1),
.access = access_pminten, .reg = PMINTENSET_EL1 }, .access = access_pminten, .reg = PMINTENSET_EL1,
.get_user = get_pmreg, .set_user = set_pmreg },
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
...@@ -2238,14 +2311,17 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -2238,14 +2311,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CTR_EL0), access_ctr }, { SYS_DESC(SYS_CTR_EL0), access_ctr },
{ SYS_DESC(SYS_SVCR), undef_access }, { SYS_DESC(SYS_SVCR), undef_access },
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
.reset = reset_pmcr, .reg = PMCR_EL0 }, .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
{ PMU_SYS_REG(PMCNTENSET_EL0), { PMU_SYS_REG(PMCNTENSET_EL0),
.access = access_pmcnten, .reg = PMCNTENSET_EL0 }, .access = access_pmcnten, .reg = PMCNTENSET_EL0,
.get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMCNTENCLR_EL0), { PMU_SYS_REG(PMCNTENCLR_EL0),
.access = access_pmcnten, .reg = PMCNTENSET_EL0 }, .access = access_pmcnten, .reg = PMCNTENSET_EL0,
.get_user = get_pmreg, .set_user = set_pmreg },
{ PMU_SYS_REG(PMOVSCLR_EL0), { PMU_SYS_REG(PMOVSCLR_EL0),
.access = access_pmovs, .reg = PMOVSSET_EL0 }, .access = access_pmovs, .reg = PMOVSSET_EL0,
.get_user = get_pmreg, .set_user = set_pmreg },
/* /*
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
* previously (and pointlessly) advertised in the past... * previously (and pointlessly) advertised in the past...
...@@ -2273,7 +2349,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -2273,7 +2349,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
{ PMU_SYS_REG(PMOVSSET_EL0), { PMU_SYS_REG(PMOVSSET_EL0),
.access = access_pmovs, .reg = PMOVSSET_EL0 }, .access = access_pmovs, .reg = PMOVSSET_EL0,
.get_user = get_pmreg, .set_user = set_pmreg },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc { struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */ u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event; struct perf_event *perf_event;
...@@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); ...@@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx); u64 select_idx);
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr); struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
...@@ -102,7 +102,10 @@ void kvm_vcpu_pmu_resync_el0(void); ...@@ -102,7 +102,10 @@ void kvm_vcpu_pmu_resync_el0(void);
u8 kvm_arm_pmu_get_pmuver_limit(void); u8 kvm_arm_pmu_get_pmuver_limit(void);
u64 kvm_pmu_evtyper_mask(struct kvm *kvm); u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
int kvm_arm_set_default_pmu(struct kvm *kvm);
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
#else #else
struct kvm_pmu { struct kvm_pmu {
}; };
...@@ -169,6 +172,7 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) ...@@ -169,6 +172,7 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
static inline u8 kvm_arm_pmu_get_pmuver_limit(void) static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{ {
return 0; return 0;
...@@ -179,6 +183,21 @@ static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) ...@@ -179,6 +183,21 @@ static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
} }
static inline void kvm_vcpu_pmu_resync_el0(void) {} static inline void kvm_vcpu_pmu_resync_el0(void) {}
static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
{
return -ENODEV;
}
static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
{
return 0;
}
static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
{
return 0;
}
#endif #endif
#endif #endif
This diff is collapsed.
...@@ -159,6 +159,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter ...@@ -159,6 +159,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
TEST_GEN_PROGS_aarch64 += access_tracking_perf_test TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_test
......
This diff is collapsed.
...@@ -104,6 +104,7 @@ enum { ...@@ -104,6 +104,7 @@ enum {
#define ESR_EC_SHIFT 26 #define ESR_EC_SHIFT 26
#define ESR_EC_MASK (ESR_EC_NUM - 1) #define ESR_EC_MASK (ESR_EC_NUM - 1)
#define ESR_EC_UNKNOWN 0x0
#define ESR_EC_SVC64 0x15 #define ESR_EC_SVC64 0x15
#define ESR_EC_IABT 0x21 #define ESR_EC_IABT 0x21
#define ESR_EC_DABT 0x25 #define ESR_EC_DABT 0x25
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment