Commit 42773357 authored by Reiji Watanabe's avatar Reiji Watanabe Committed by Oliver Upton

KVM: arm64: Select default PMU in KVM_ARM_VCPU_INIT handler

Future changes to KVM's sysreg emulation will rely on having a valid PMU
instance to determine the number of implemented counters (PMCR_EL0.N).
This is earlier than when userspace is expected to modify the vPMU
device attributes, where the default is selected today.

Select the default PMU when handling KVM_ARM_VCPU_INIT such that it is
available in time for sysreg emulation.
Reviewed-by: default avatarSebastian Ott <sebott@redhat.com>
Co-developed-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarReiji Watanabe <reijiw@google.com>
Signed-off-by: default avatarRaghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20231020214053.2144305-3-rananta@google.com
[Oliver: rewrite changelog]
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 1616ca6f
...@@ -1229,6 +1229,21 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, ...@@ -1229,6 +1229,21 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
} }
static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
int ret = 0;
/*
* When the vCPU has a PMU, but no PMU is set for the guest
* yet, set the default one.
*/
if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
ret = kvm_arm_set_default_pmu(kvm);
return ret;
}
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_init *init) const struct kvm_vcpu_init *init)
{ {
...@@ -1244,6 +1259,10 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, ...@@ -1244,6 +1259,10 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
ret = kvm_setup_vcpu(vcpu);
if (ret)
goto out_unlock;
/* Now we know what it is, we can reset it. */ /* Now we know what it is, we can reset it. */
ret = kvm_reset_vcpu(vcpu); ret = kvm_reset_vcpu(vcpu);
if (ret) { if (ret) {
......
...@@ -717,10 +717,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) ...@@ -717,10 +717,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
* It is still necessary to get a valid cpu, though, to probe for the * It is still necessary to get a valid cpu, though, to probe for the
* default PMU instance as userspace is not required to specify a PMU * default PMU instance as userspace is not required to specify a PMU
* type. In order to uphold the preexisting behavior KVM selects the * type. In order to uphold the preexisting behavior KVM selects the
* PMU instance for the core where the first call to the * PMU instance for the core during vcpu init. A dependent use
* KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case * case would be a user with disdain of all things big.LITTLE that
* would be a user with disdain of all things big.LITTLE that affines * affines the VMM to a particular cluster of cores.
* the VMM to a particular cluster of cores.
* *
* In any case, userspace should just do the sane thing and use the UAPI * In any case, userspace should just do the sane thing and use the UAPI
* to select a PMU type directly. But, be wary of the baggage being * to select a PMU type directly. But, be wary of the baggage being
...@@ -893,7 +892,7 @@ static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) ...@@ -893,7 +892,7 @@ static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
* where vCPUs can be scheduled on any core but the guest * where vCPUs can be scheduled on any core but the guest
* counters could stop working. * counters could stop working.
*/ */
static int kvm_arm_set_default_pmu(struct kvm *kvm) int kvm_arm_set_default_pmu(struct kvm *kvm)
{ {
struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu(); struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
...@@ -946,13 +945,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -946,13 +945,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (vcpu->arch.pmu.created) if (vcpu->arch.pmu.created)
return -EBUSY; return -EBUSY;
if (!kvm->arch.arm_pmu) {
int ret = kvm_arm_set_default_pmu(kvm);
if (ret)
return ret;
}
switch (attr->attr) { switch (attr->attr) {
case KVM_ARM_VCPU_PMU_V3_IRQ: { case KVM_ARM_VCPU_PMU_V3_IRQ: {
int __user *uaddr = (int __user *)(long)attr->addr; int __user *uaddr = (int __user *)(long)attr->addr;
......
...@@ -101,6 +101,7 @@ void kvm_vcpu_pmu_resync_el0(void); ...@@ -101,6 +101,7 @@ void kvm_vcpu_pmu_resync_el0(void);
}) })
u8 kvm_arm_pmu_get_pmuver_limit(void); u8 kvm_arm_pmu_get_pmuver_limit(void);
int kvm_arm_set_default_pmu(struct kvm *kvm);
#else #else
struct kvm_pmu { struct kvm_pmu {
...@@ -174,6 +175,11 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void) ...@@ -174,6 +175,11 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
} }
static inline void kvm_vcpu_pmu_resync_el0(void) {} static inline void kvm_vcpu_pmu_resync_el0(void) {}
static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
{
return -ENODEV;
}
#endif #endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment