Commit 2b4825a8 authored by Marc Zyngier's avatar Marc Zyngier

KVM: arm64: timers: Use CNTPOFF_EL2 to offset the physical timer

With ECV and CNTPOFF_EL2, it is very easy to offer an offset for
the physical timer. So let's do just that.

Nothing can set the offset yet, so this should have no effect
whatsoever (famous last words...).
Reviewed-by: default avatarColton Lewis <coltonlewis@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230330174800.2677007-5-maz@kernel.org
parent 32634994
...@@ -52,6 +52,11 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, ...@@ -52,6 +52,11 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer, struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg); enum kvm_arch_timer_regs treg);
static bool has_cntpoff(void)
{
return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
}
u32 timer_get_ctl(struct arch_timer_context *ctxt) u32 timer_get_ctl(struct arch_timer_context *ctxt)
{ {
struct kvm_vcpu *vcpu = ctxt->vcpu; struct kvm_vcpu *vcpu = ctxt->vcpu;
...@@ -84,7 +89,7 @@ u64 timer_get_cval(struct arch_timer_context *ctxt) ...@@ -84,7 +89,7 @@ u64 timer_get_cval(struct arch_timer_context *ctxt)
static u64 timer_get_offset(struct arch_timer_context *ctxt) static u64 timer_get_offset(struct arch_timer_context *ctxt)
{ {
if (ctxt->offset.vm_offset) if (ctxt && ctxt->offset.vm_offset)
return *ctxt->offset.vm_offset; return *ctxt->offset.vm_offset;
return 0; return 0;
...@@ -432,6 +437,12 @@ static void set_cntvoff(u64 cntvoff) ...@@ -432,6 +437,12 @@ static void set_cntvoff(u64 cntvoff)
kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff); kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
} }
static void set_cntpoff(u64 cntpoff)
{
if (has_cntpoff())
write_sysreg_s(cntpoff, SYS_CNTPOFF_EL2);
}
static void timer_save_state(struct arch_timer_context *ctx) static void timer_save_state(struct arch_timer_context *ctx)
{ {
struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
...@@ -480,6 +491,7 @@ static void timer_save_state(struct arch_timer_context *ctx) ...@@ -480,6 +491,7 @@ static void timer_save_state(struct arch_timer_context *ctx)
write_sysreg_el0(0, SYS_CNTP_CTL); write_sysreg_el0(0, SYS_CNTP_CTL);
isb(); isb();
set_cntpoff(0);
break; break;
case NR_KVM_TIMERS: case NR_KVM_TIMERS:
BUG(); BUG();
...@@ -550,6 +562,7 @@ static void timer_restore_state(struct arch_timer_context *ctx) ...@@ -550,6 +562,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
break; break;
case TIMER_PTIMER: case TIMER_PTIMER:
set_cntpoff(timer_get_offset(ctx));
write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL); write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
isb(); isb();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
...@@ -767,6 +780,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -767,6 +780,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
vtimer->vcpu = vcpu; vtimer->vcpu = vcpu;
vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset; vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset;
ptimer->vcpu = vcpu; ptimer->vcpu = vcpu;
ptimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.poffset;
/* Synchronize cntvoff across all vtimers of a VM. */ /* Synchronize cntvoff across all vtimers of a VM. */
timer_set_offset(vtimer, kvm_phys_timer_read()); timer_set_offset(vtimer, kvm_phys_timer_read());
...@@ -1297,6 +1311,8 @@ void kvm_timer_init_vhe(void) ...@@ -1297,6 +1311,8 @@ void kvm_timer_init_vhe(void)
val = read_sysreg(cnthctl_el2); val = read_sysreg(cnthctl_el2);
val |= (CNTHCTL_EL1PCEN << cnthctl_shift); val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
val |= CNTHCTL_ECV;
write_sysreg(val, cnthctl_el2); write_sysreg(val, cnthctl_el2);
} }
......
...@@ -47,7 +47,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) ...@@ -47,7 +47,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset; cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
break; break;
case KVM_PTP_PHYS_COUNTER: case KVM_PTP_PHYS_COUNTER:
cycles = systime_snapshot.cycles; cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.poffset;
break; break;
default: default:
return; return;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#define CNTHCTL_EVNTEN (1 << 2) #define CNTHCTL_EVNTEN (1 << 2)
#define CNTHCTL_EVNTDIR (1 << 3) #define CNTHCTL_EVNTDIR (1 << 3)
#define CNTHCTL_EVNTI (0xF << 4) #define CNTHCTL_EVNTI (0xF << 4)
#define CNTHCTL_ECV (1 << 12)
enum arch_timer_reg { enum arch_timer_reg {
ARCH_TIMER_REG_CTRL, ARCH_TIMER_REG_CTRL,
......
...@@ -34,6 +34,8 @@ struct arch_timer_offset { ...@@ -34,6 +34,8 @@ struct arch_timer_offset {
struct arch_timer_vm_data { struct arch_timer_vm_data {
/* Offset applied to the virtual timer/counter */ /* Offset applied to the virtual timer/counter */
u64 voffset; u64 voffset;
/* Offset applied to the physical timer/counter */
u64 poffset;
}; };
struct arch_timer_context { struct arch_timer_context {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment