Commit 9800c24e authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.4-2' of...

Merge tag 'kvmarm-fixes-5.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm fixes for 5.4, take #2

Special PMU edition:

- Fix cycle counter truncation
- Fix cycle counter overflow limit on pure 64bit system
- Allow chained events to be actually functional
- Correct sample period after overflow
parents 49dedf0d 8c3252c0
...@@ -632,6 +632,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) ...@@ -632,6 +632,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
*/ */
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
if (!system_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = val; __vcpu_sys_reg(vcpu, r->reg) = val;
} }
...@@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val = __vcpu_sys_reg(vcpu, PMCR_EL0); val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK; val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK;
if (!system_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val; __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val); kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu); kvm_vcpu_pmu_restore_guest(vcpu);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/perf/arm_pmu.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h> #include <kvm/arm_pmu.h>
...@@ -146,8 +147,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) ...@@ -146,8 +147,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
if (kvm_pmu_pmc_is_chained(pmc) && if (kvm_pmu_pmc_is_chained(pmc) &&
kvm_pmu_idx_is_high_counter(select_idx)) kvm_pmu_idx_is_high_counter(select_idx))
counter = upper_32_bits(counter); counter = upper_32_bits(counter);
else if (select_idx != ARMV8_PMU_CYCLE_IDX)
else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
counter = lower_32_bits(counter); counter = lower_32_bits(counter);
return counter; return counter;
...@@ -193,7 +193,7 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) ...@@ -193,7 +193,7 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
*/ */
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
{ {
u64 counter, reg; u64 counter, reg, val;
pmc = kvm_pmu_get_canonical_pmc(pmc); pmc = kvm_pmu_get_canonical_pmc(pmc);
if (!pmc->perf_event) if (!pmc->perf_event)
...@@ -201,16 +201,19 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) ...@@ -201,16 +201,19 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
if (kvm_pmu_pmc_is_chained(pmc)) { if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
reg = PMEVCNTR0_EL0 + pmc->idx; reg = PMCCNTR_EL0;
__vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter); val = counter;
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
} else { } else {
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) reg = PMEVCNTR0_EL0 + pmc->idx;
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; val = lower_32_bits(counter);
__vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
} }
__vcpu_sys_reg(vcpu, reg) = val;
if (kvm_pmu_pmc_is_chained(pmc))
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
kvm_pmu_release_perf_event(pmc); kvm_pmu_release_perf_event(pmc);
} }
...@@ -440,8 +443,25 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, ...@@ -440,8 +443,25 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct kvm_pmc *pmc = perf_event->overflow_handler_context; struct kvm_pmc *pmc = perf_event->overflow_handler_context;
struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
int idx = pmc->idx; int idx = pmc->idx;
u64 period;
cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
/*
* Reset the sample period to the architectural limit,
* i.e. the point where the counter overflows.
*/
period = -(local64_read(&perf_event->count));
if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
period &= GENMASK(31, 0);
local64_set(&perf_event->hw.period_left, 0);
perf_event->attr.sample_period = period;
perf_event->hw.sample_period = period;
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
...@@ -449,6 +469,8 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, ...@@ -449,6 +469,8 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} }
cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
} }
/** /**
...@@ -567,12 +589,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) ...@@ -567,12 +589,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
* high counter. * high counter.
*/ */
attr.sample_period = (-counter) & GENMASK(63, 0); attr.sample_period = (-counter) & GENMASK(63, 0);
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
event = perf_event_create_kernel_counter(&attr, -1, current, event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, kvm_pmu_perf_overflow,
pmc + 1); pmc + 1);
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
} else { } else {
/* The initial sample period (overflow count) of an event. */ /* The initial sample period (overflow count) of an event. */
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment