Commit 6c62cc43 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-arm-fixes-for-v4.16-1' of...

Merge tag 'kvm-arm-fixes-for-v4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/ARM Fixes for v4.16, Round 1

Fix the interaction of userspace irqchip VMs with in-kernl irqchip VMs
and make sure we can build 32-bit KVM/ARM with gcc-8.
parents 722c2cd7 67870eb1
...@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING ...@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm KVM=../../../../virt/kvm
CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
...@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o ...@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o
CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
...@@ -20,6 +20,10 @@ ...@@ -20,6 +20,10 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
/*
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
* trick the assembler.
*/
__asm__(".arch_extension virt"); __asm__(".arch_extension virt");
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
......
...@@ -36,6 +36,8 @@ static struct timecounter *timecounter; ...@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
static unsigned int host_vtimer_irq; static unsigned int host_vtimer_irq;
static u32 host_vtimer_irq_flags; static u32 host_vtimer_irq_flags;
static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
static const struct kvm_irq_level default_ptimer_irq = { static const struct kvm_irq_level default_ptimer_irq = {
.irq = 30, .irq = 30,
.level = 1, .level = 1,
...@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void) ...@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
return timecounter->cc->read(timecounter->cc); return timecounter->cc->read(timecounter->cc);
} }
static inline bool userspace_irqchip(struct kvm *kvm)
{
return static_branch_unlikely(&userspace_irqchip_in_use) &&
unlikely(!irqchip_in_kernel(kvm));
}
static void soft_timer_start(struct hrtimer *hrt, u64 ns) static void soft_timer_start(struct hrtimer *hrt, u64 ns)
{ {
hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
...@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) ...@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
cancel_work_sync(work); cancel_work_sync(work);
} }
static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
/*
* When using a userspace irqchip with the architected timers, we must
* prevent continuously exiting from the guest, and therefore mask the
* physical interrupt by disabling it on the host interrupt controller
* when the virtual level is high, such that the guest can make
* forward progress. Once we detect the output level being
* de-asserted, we unmask the interrupt again so that we exit from the
* guest when the timer fires.
*/
if (vtimer->irq.level)
disable_percpu_irq(host_vtimer_irq);
else
enable_percpu_irq(host_vtimer_irq, 0);
}
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{ {
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
...@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) ...@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
if (kvm_timer_should_fire(vtimer)) if (kvm_timer_should_fire(vtimer))
kvm_timer_update_irq(vcpu, true, vtimer); kvm_timer_update_irq(vcpu, true, vtimer);
if (static_branch_unlikely(&userspace_irqchip_in_use) && if (userspace_irqchip(vcpu->kvm) &&
unlikely(!irqchip_in_kernel(vcpu->kvm))) !static_branch_unlikely(&has_gic_active_state))
kvm_vtimer_update_mask_user(vcpu); disable_percpu_irq(host_vtimer_irq);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, ...@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
timer_ctx->irq.level); timer_ctx->irq.level);
if (!static_branch_unlikely(&userspace_irqchip_in_use) || if (!userspace_irqchip(vcpu->kvm)) {
likely(irqchip_in_kernel(vcpu->kvm))) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer_ctx->irq.irq, timer_ctx->irq.irq,
timer_ctx->irq.level, timer_ctx->irq.level,
...@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) ...@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
phys_timer_emulate(vcpu); phys_timer_emulate(vcpu);
} }
static void __timer_snapshot_state(struct arch_timer_context *timer)
{
timer->cnt_ctl = read_sysreg_el0(cntv_ctl);
timer->cnt_cval = read_sysreg_el0(cntv_cval);
}
static void vtimer_save_state(struct kvm_vcpu *vcpu) static void vtimer_save_state(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
...@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu) ...@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
if (!vtimer->loaded) if (!vtimer->loaded)
goto out; goto out;
if (timer->enabled) if (timer->enabled) {
__timer_snapshot_state(vtimer); vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
}
/* Disable the virtual timer */ /* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl); write_sysreg_el0(0, cntv_ctl);
...@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff) ...@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
} }
static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu) static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active)
{
int r;
r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active);
WARN_ON(r);
}
static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
bool phys_active; bool phys_active;
int ret;
phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); if (irqchip_in_kernel(vcpu->kvm))
phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
ret = irq_set_irqchip_state(host_vtimer_irq, else
IRQCHIP_STATE_ACTIVE, phys_active = vtimer->irq.level;
phys_active); set_vtimer_irq_phys_active(vcpu, phys_active);
WARN_ON(ret);
} }
static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu) static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
{ {
kvm_vtimer_update_mask_user(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
/*
* When using a userspace irqchip with the architected timers and a
* host interrupt controller that doesn't support an active state, we
* must still prevent continuously exiting from the guest, and
* therefore mask the physical interrupt by disabling it on the host
* interrupt controller when the virtual level is high, such that the
* guest can make forward progress. Once we detect the output level
* being de-asserted, we unmask the interrupt again so that we exit
* from the guest when the timer fires.
*/
if (vtimer->irq.level)
disable_percpu_irq(host_vtimer_irq);
else
enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
} }
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
...@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) ...@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
if (unlikely(!timer->enabled)) if (unlikely(!timer->enabled))
return; return;
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) if (static_branch_likely(&has_gic_active_state))
kvm_timer_vcpu_load_user(vcpu); kvm_timer_vcpu_load_gic(vcpu);
else else
kvm_timer_vcpu_load_vgic(vcpu); kvm_timer_vcpu_load_nogic(vcpu);
set_cntvoff(vtimer->cntvoff); set_cntvoff(vtimer->cntvoff);
...@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) ...@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { if (!kvm_timer_should_fire(vtimer)) {
__timer_snapshot_state(vtimer); kvm_timer_update_irq(vcpu, false, vtimer);
if (!kvm_timer_should_fire(vtimer)) { if (static_branch_likely(&has_gic_active_state))
kvm_timer_update_irq(vcpu, false, vtimer); set_vtimer_irq_phys_active(vcpu, false);
kvm_vtimer_update_mask_user(vcpu); else
} enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
} }
} }
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{ {
unmask_vtimer_irq_user(vcpu); struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
if (unlikely(!timer->enabled))
return;
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
unmask_vtimer_irq_user(vcpu);
} }
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
...@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic) ...@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic)
kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
goto out_free_irq; goto out_free_irq;
} }
static_branch_enable(&has_gic_active_state);
} }
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment