Commit 6e5d8c71 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvm-arm64/pmu-undef' into kvmarm-master/next

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 8c38602f 7521c3a9
...@@ -733,4 +733,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); ...@@ -733,4 +733,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \ #define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
#endif /* __ARM64_KVM_HOST_H__ */ #endif /* __ARM64_KVM_HOST_H__ */
...@@ -384,7 +384,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) ...@@ -384,7 +384,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool overflow; bool overflow;
if (!kvm_arm_pmu_v3_ready(vcpu)) if (!kvm_vcpu_has_pmu(vcpu))
return; return;
overflow = !!kvm_pmu_overflow_status(vcpu); overflow = !!kvm_pmu_overflow_status(vcpu);
...@@ -825,9 +825,12 @@ bool kvm_arm_support_pmu_v3(void) ...@@ -825,9 +825,12 @@ bool kvm_arm_support_pmu_v3(void)
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->arch.pmu.created) if (!kvm_vcpu_has_pmu(vcpu))
return 0; return 0;
if (!vcpu->arch.pmu.created)
return -EINVAL;
/* /*
* A valid interrupt configuration for the PMU is either to have a * A valid interrupt configuration for the PMU is either to have a
* properly configured interrupt number and using an in-kernel * properly configured interrupt number and using an in-kernel
...@@ -835,9 +838,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) ...@@ -835,9 +838,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
*/ */
if (irqchip_in_kernel(vcpu->kvm)) { if (irqchip_in_kernel(vcpu->kvm)) {
int irq = vcpu->arch.pmu.irq_num; int irq = vcpu->arch.pmu.irq_num;
if (!kvm_arm_pmu_irq_initialized(vcpu))
return -EINVAL;
/* /*
* If we are using an in-kernel vgic, at this point we know * If we are using an in-kernel vgic, at this point we know
* the vgic will be initialized, so we can check the PMU irq * the vgic will be initialized, so we can check the PMU irq
...@@ -851,7 +851,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) ...@@ -851,7 +851,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
} }
kvm_pmu_vcpu_reset(vcpu); kvm_pmu_vcpu_reset(vcpu);
vcpu->arch.pmu.ready = true;
return 0; return 0;
} }
...@@ -913,8 +912,7 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) ...@@ -913,8 +912,7 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ {
if (!kvm_arm_support_pmu_v3() || if (!kvm_vcpu_has_pmu(vcpu))
!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
return -ENODEV; return -ENODEV;
if (vcpu->arch.pmu.created) if (vcpu->arch.pmu.created)
...@@ -1015,7 +1013,7 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -1015,7 +1013,7 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (!irqchip_in_kernel(vcpu->kvm)) if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL; return -EINVAL;
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features)) if (!kvm_vcpu_has_pmu(vcpu))
return -ENODEV; return -ENODEV;
if (!kvm_arm_pmu_irq_initialized(vcpu)) if (!kvm_arm_pmu_irq_initialized(vcpu))
...@@ -1035,8 +1033,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -1035,8 +1033,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
case KVM_ARM_VCPU_PMU_V3_IRQ: case KVM_ARM_VCPU_PMU_V3_IRQ:
case KVM_ARM_VCPU_PMU_V3_INIT: case KVM_ARM_VCPU_PMU_V3_INIT:
case KVM_ARM_VCPU_PMU_V3_FILTER: case KVM_ARM_VCPU_PMU_V3_FILTER:
if (kvm_arm_support_pmu_v3() && if (kvm_vcpu_has_pmu(vcpu))
test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
return 0; return 0;
} }
......
...@@ -285,6 +285,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -285,6 +285,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
pstate = VCPU_RESET_PSTATE_EL1; pstate = VCPU_RESET_PSTATE_EL1;
} }
if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
ret = -EINVAL;
goto out;
}
break; break;
} }
......
...@@ -609,8 +609,9 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) ...@@ -609,8 +609,9 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
{ {
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); bool enabled = kvm_vcpu_has_pmu(vcpu);
enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
if (!enabled) if (!enabled)
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
...@@ -642,9 +643,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -642,9 +643,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ {
u64 val; u64 val;
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
if (pmu_access_el0_disabled(vcpu)) if (pmu_access_el0_disabled(vcpu))
return false; return false;
...@@ -671,9 +669,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -671,9 +669,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
if (pmu_access_event_counter_el0_disabled(vcpu)) if (pmu_access_event_counter_el0_disabled(vcpu))
return false; return false;
...@@ -692,9 +687,6 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -692,9 +687,6 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ {
u64 pmceid; u64 pmceid;
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
BUG_ON(p->is_write); BUG_ON(p->is_write);
if (pmu_access_el0_disabled(vcpu)) if (pmu_access_el0_disabled(vcpu))
...@@ -725,10 +717,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, ...@@ -725,10 +717,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
u64 idx; u64 idx = ~0UL;
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
if (r->CRn == 9 && r->CRm == 13) { if (r->CRn == 9 && r->CRm == 13) {
if (r->Op2 == 2) { if (r->Op2 == 2) {
...@@ -744,8 +733,6 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, ...@@ -744,8 +733,6 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
return false; return false;
idx = ARMV8_PMU_CYCLE_IDX; idx = ARMV8_PMU_CYCLE_IDX;
} else {
return false;
} }
} else if (r->CRn == 0 && r->CRm == 9) { } else if (r->CRn == 0 && r->CRm == 9) {
/* PMCCNTR */ /* PMCCNTR */
...@@ -759,10 +746,11 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, ...@@ -759,10 +746,11 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
return false; return false;
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
} else {
return false;
} }
/* Catch any decoding mistake */
WARN_ON(idx == ~0UL);
if (!pmu_counter_idx_valid(vcpu, idx)) if (!pmu_counter_idx_valid(vcpu, idx))
return false; return false;
...@@ -783,9 +771,6 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -783,9 +771,6 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ {
u64 idx, reg; u64 idx, reg;
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
if (pmu_access_el0_disabled(vcpu)) if (pmu_access_el0_disabled(vcpu))
return false; return false;
...@@ -823,9 +808,6 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -823,9 +808,6 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ {
u64 val, mask; u64 val, mask;
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
if (pmu_access_el0_disabled(vcpu)) if (pmu_access_el0_disabled(vcpu))
return false; return false;
...@@ -854,13 +836,8 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -854,13 +836,8 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ {
u64 mask = kvm_pmu_valid_counter_mask(vcpu); u64 mask = kvm_pmu_valid_counter_mask(vcpu);
if (!kvm_arm_pmu_v3_ready(vcpu)) if (check_pmu_access_disabled(vcpu, 0))
return trap_raz_wi(vcpu, p, r);
if (!vcpu_mode_priv(vcpu)) {
kvm_inject_undefined(vcpu);
return false; return false;
}
if (p->is_write) { if (p->is_write) {
u64 val = p->regval & mask; u64 val = p->regval & mask;
...@@ -883,9 +860,6 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -883,9 +860,6 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ {
u64 mask = kvm_pmu_valid_counter_mask(vcpu); u64 mask = kvm_pmu_valid_counter_mask(vcpu);
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
if (pmu_access_el0_disabled(vcpu)) if (pmu_access_el0_disabled(vcpu))
return false; return false;
...@@ -908,9 +882,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -908,9 +882,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ {
u64 mask; u64 mask;
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
if (!p->is_write) if (!p->is_write)
return read_from_write_only(vcpu, p, r); return read_from_write_only(vcpu, p, r);
...@@ -925,8 +896,10 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -925,8 +896,10 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
if (!kvm_arm_pmu_v3_ready(vcpu)) if (!kvm_vcpu_has_pmu(vcpu)) {
return trap_raz_wi(vcpu, p, r); kvm_inject_undefined(vcpu);
return false;
}
if (p->is_write) { if (p->is_write) {
if (!vcpu_mode_priv(vcpu)) { if (!vcpu_mode_priv(vcpu)) {
...@@ -1061,10 +1034,15 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1061,10 +1034,15 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
(0xfUL << ID_AA64ISAR1_GPA_SHIFT) | (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
(0xfUL << ID_AA64ISAR1_GPI_SHIFT)); (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
} else if (id == SYS_ID_AA64DFR0_EL1) { } else if (id == SYS_ID_AA64DFR0_EL1) {
u64 cap = 0;
/* Limit guests to PMUv3 for ARMv8.1 */ /* Limit guests to PMUv3 for ARMv8.1 */
if (kvm_vcpu_has_pmu(vcpu))
cap = ID_AA64DFR0_PMUVER_8_1;
val = cpuid_feature_cap_perfmon_field(val, val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT, ID_AA64DFR0_PMUVER_SHIFT,
ID_AA64DFR0_PMUVER_8_1); cap);
} else if (id == SYS_ID_DFR0_EL1) { } else if (id == SYS_ID_DFR0_EL1) {
/* Limit guests to PMUv3 for ARMv8.1 */ /* Limit guests to PMUv3 for ARMv8.1 */
val = cpuid_feature_cap_perfmon_field(val, val = cpuid_feature_cap_perfmon_field(val,
......
...@@ -24,13 +24,11 @@ struct kvm_pmu { ...@@ -24,13 +24,11 @@ struct kvm_pmu {
int irq_num; int irq_num;
struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
bool ready;
bool created; bool created;
bool irq_level; bool irq_level;
struct irq_work overflow_work; struct irq_work overflow_work;
}; };
#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
...@@ -61,7 +59,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); ...@@ -61,7 +59,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu { struct kvm_pmu {
}; };
#define kvm_arm_pmu_v3_ready(v) (false)
#define kvm_arm_pmu_irq_initialized(v) (false) #define kvm_arm_pmu_irq_initialized(v) (false)
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx) u64 select_idx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment