Commit b1ad37aa authored by Christoffer Dall's avatar Christoffer Dall Committed by Luis Henriques

arm/arm64: KVM: Require in-kernel vgic for the arch timers

commit 05971120 upstream.

It is curently possible to run a VM with architected timers support
without creating an in-kernel VGIC, which will result in interrupts from
the virtual timer going nowhere.

To address this issue, move the architected timers initialization to the
time when we run a VCPU for the first time, and then only initialize
(and enable) the architected timers if we have a properly created and
initialized in-kernel VGIC.

When injecting interrupts from the virtual timer to the vgic, the
current setup should ensure that this never calls an on-demand init of
the VGIC, which is the only call path that could return an error from
kvm_vgic_inject_irq(), so capture the return value and raise a warning
if there's an error there.

We also change the kvm_timer_init() function from returning an int to be
a void function, since the function always succeeds.
Reviewed-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarShannon Zhao <shannon.zhao@linaro.org>
[ luis: backported to 3.16: adjusted context ]
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent 3376ce3c
...@@ -442,6 +442,7 @@ static void update_vttbr(struct kvm *kvm) ...@@ -442,6 +442,7 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm;
int ret; int ret;
if (likely(vcpu->arch.has_run_once)) if (likely(vcpu->arch.has_run_once))
...@@ -453,12 +454,20 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -453,12 +454,20 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Initialize the VGIC before running a vcpu the first time on * Initialize the VGIC before running a vcpu the first time on
* this VM. * this VM.
*/ */
if (unlikely(!vgic_initialized(vcpu->kvm))) { if (unlikely(!vgic_initialized(kvm))) {
ret = kvm_vgic_init(vcpu->kvm); ret = kvm_vgic_init(kvm);
if (ret) if (ret)
return ret; return ret;
} }
/*
* Enable the arch timers only if we have an in-kernel VGIC
* and it has been properly initialized, since we cannot handle
* interrupts from the virtual timer with a userspace gic.
*/
if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
kvm_timer_enable(kvm);
return 0; return 0;
} }
......
...@@ -60,7 +60,8 @@ struct arch_timer_cpu { ...@@ -60,7 +60,8 @@ struct arch_timer_cpu {
#ifdef CONFIG_KVM_ARM_TIMER #ifdef CONFIG_KVM_ARM_TIMER
int kvm_timer_hyp_init(void); int kvm_timer_hyp_init(void);
int kvm_timer_init(struct kvm *kvm); void kvm_timer_enable(struct kvm *kvm);
void kvm_timer_init(struct kvm *kvm);
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq); const struct kvm_irq_level *irq);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
...@@ -73,11 +74,8 @@ static inline int kvm_timer_hyp_init(void) ...@@ -73,11 +74,8 @@ static inline int kvm_timer_hyp_init(void)
return 0; return 0;
}; };
static inline int kvm_timer_init(struct kvm *kvm) static inline void kvm_timer_enable(struct kvm *kvm) {}
{ static inline void kvm_timer_init(struct kvm *kvm) {}
return 0;
}
static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq) {} const struct kvm_irq_level *irq) {}
static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
......
...@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer) ...@@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
{ {
int ret;
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer->irq->irq, timer->irq->irq,
timer->irq->level); timer->irq->level);
WARN_ON(ret);
} }
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
...@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) ...@@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
timer_disarm(timer); timer_disarm(timer);
} }
int kvm_timer_init(struct kvm *kvm) void kvm_timer_enable(struct kvm *kvm)
{ {
if (timecounter && wqueue) { if (kvm->arch.timer.enabled)
kvm->arch.timer.cntvoff = kvm_phys_timer_read(); return;
/*
* There is a potential race here between VCPUs starting for the first
* time, which may be enabling the timer multiple times. That doesn't
* hurt though, because we're just setting a variable to the same
* variable that it already was. The important thing is that all
* VCPUs have the enabled variable set, before entering the guest, if
* the arch timers are enabled.
*/
if (timecounter && wqueue)
kvm->arch.timer.enabled = 1; kvm->arch.timer.enabled = 1;
} }
return 0; void kvm_timer_init(struct kvm *kvm)
{
kvm->arch.timer.cntvoff = kvm_phys_timer_read();
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment