Commit 2d0e63e0 authored by Christoffer Dall's avatar Christoffer Dall Committed by Marc Zyngier

KVM: arm/arm64: Avoid VGICv3 save/restore on VHE with no IRQs

We can finally get completely rid of any calls to the VGICv3
save/restore functions when the AP lists are empty on VHE systems.  This
requires carefully factoring out trap configuration from saving and
restoring state, and carefully choosing what to do on the VHE and
non-VHE path.

One of the challenges is that we cannot save/restore the VMCR lazily
because we can only write the VMCR when ICC_SRE_EL1.SRE is cleared when
emulating a GICv2-on-GICv3, since otherwise all Group-0 interrupts end
up being delivered as FIQ.

To solve this problem, and still provide fast performance in the fast
path of exiting a VM when no interrupts are pending (which also
optimized the latency for actually delivering virtual interrupts coming
from physical interrupts), we orchestrate a dance of only doing the
activate/deactivate traps in vgic load/put for VHE systems (which can
have ICC_SRE_EL1.SRE cleared when running in the host), and doing the
configuration on every round-trip on non-VHE systems.
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 923a2e30
...@@ -110,6 +110,8 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt); ...@@ -110,6 +110,8 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu); void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu); void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
......
...@@ -90,14 +90,18 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) ...@@ -90,14 +90,18 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
{ {
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(vcpu); __vgic_v3_save_state(vcpu);
__vgic_v3_deactivate_traps(vcpu);
}
} }
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
{ {
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(vcpu);
__vgic_v3_restore_state(vcpu); __vgic_v3_restore_state(vcpu);
}
} }
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
......
...@@ -124,6 +124,8 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); ...@@ -124,6 +124,8 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu); void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu); void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
......
...@@ -195,15 +195,19 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) ...@@ -195,15 +195,19 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
/* Save VGICv3 state on non-VHE systems */ /* Save VGICv3 state on non-VHE systems */
static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu) static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
{ {
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(vcpu); __vgic_v3_save_state(vcpu);
__vgic_v3_deactivate_traps(vcpu);
}
} }
/* Restore VGICv3 state on non_VEH systems */ /* Restore VGICv3 state on non_VEH systems */
static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{ {
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(vcpu);
__vgic_v3_restore_state(vcpu); __vgic_v3_restore_state(vcpu);
}
} }
static bool __hyp_text __true_value(void) static bool __hyp_text __true_value(void)
......
...@@ -209,15 +209,15 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -209,15 +209,15 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
{ {
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 val;
/* /*
* Make sure stores to the GIC via the memory mapped interface * Make sure stores to the GIC via the memory mapped interface
* are now visible to the system register interface. * are now visible to the system register interface when reading the
* LRs, and when reading back the VMCR on non-VHE systems.
*/ */
if (!cpu_if->vgic_sre) { if (used_lrs || !has_vhe()) {
dsb(st); if (!cpu_if->vgic_sre)
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); dsb(st);
} }
if (used_lrs) { if (used_lrs) {
...@@ -226,7 +226,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -226,7 +226,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
elrsr = read_gicreg(ICH_ELSR_EL2); elrsr = read_gicreg(ICH_ELSR_EL2);
write_gicreg(0, ICH_HCR_EL2); write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++) { for (i = 0; i < used_lrs; i++) {
if (elrsr & (1 << i)) if (elrsr & (1 << i))
...@@ -236,19 +236,6 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -236,19 +236,6 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
__gic_v3_set_lr(0, i); __gic_v3_set_lr(0, i);
} }
} else {
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
cpu_if->its_vpe.its_vm)
write_gicreg(0, ICH_HCR_EL2);
}
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
if (!cpu_if->vgic_sre) {
/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
isb();
write_gicreg(1, ICC_SRE_EL1);
} }
} }
...@@ -258,6 +245,31 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -258,6 +245,31 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
int i; int i;
if (used_lrs) {
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++)
__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
}
/*
* Ensure that writes to the LRs, and on non-VHE systems ensure that
* the write to the VMCR in __vgic_v3_activate_traps(), will have
* reached the (re)distributors. This ensure the guest will read the
* correct values from the memory-mapped interface.
*/
if (used_lrs || !has_vhe()) {
if (!cpu_if->vgic_sre) {
isb();
dsb(sy);
}
}
}
void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
/* /*
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
* Group0 interrupt (as generated in GICv2 mode) to be * Group0 interrupt (as generated in GICv2 mode) to be
...@@ -265,47 +277,69 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -265,47 +277,69 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
* consequences. So we must make sure that ICC_SRE_EL1 has * consequences. So we must make sure that ICC_SRE_EL1 has
* been actually programmed with the value we want before * been actually programmed with the value we want before
* starting to mess with the rest of the GIC, and VMCR_EL2 in * starting to mess with the rest of the GIC, and VMCR_EL2 in
* particular. * particular. This logic must be called before
* __vgic_v3_restore_state().
*/ */
if (!cpu_if->vgic_sre) { if (!cpu_if->vgic_sre) {
write_gicreg(0, ICC_SRE_EL1); write_gicreg(0, ICC_SRE_EL1);
isb(); isb();
write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
}
if (used_lrs) {
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++) if (has_vhe()) {
__gic_v3_set_lr(cpu_if->vgic_lr[i], i); /*
} else { * Ensure that the write to the VMCR will have reached
/* * the (re)distributors. This ensure the guest will
* If we need to trap system registers, we must write * read the correct values from the memory-mapped
* ICH_HCR_EL2 anyway, even if no interrupts are being * interface.
* injected. Same thing if GICv4 is used, as VLPI */
* delivery is gated by ICH_HCR_EL2.En. isb();
*/ dsb(sy);
if (static_branch_unlikely(&vgic_v3_cpuif_trap) || }
cpu_if->its_vpe.its_vm)
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
} }
/* /*
* Ensures that the above will have reached the * Prevent the guest from touching the GIC system registers if
* (re)distributors. This ensure the guest will read the * SRE isn't enabled for GICv3 emulation.
* correct values from the memory-mapped interface.
*/ */
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
ICC_SRE_EL2);
/*
* If we need to trap system registers, we must write
* ICH_HCR_EL2 anyway, even if no interrupts are being
* injected,
*/
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
cpu_if->its_vpe.its_vm)
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
}
void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 val;
if (!cpu_if->vgic_sre) { if (!cpu_if->vgic_sre) {
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
}
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
if (!cpu_if->vgic_sre) {
/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
isb(); isb();
dsb(sy); write_gicreg(1, ICC_SRE_EL1);
} }
/* /*
* Prevent the guest from touching the GIC system registers if * If we were trapping system registers, we enabled the VGIC even if
* SRE isn't enabled for GICv3 emulation. * no interrupts were being injected, and we disable it again here.
*/ */
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
ICC_SRE_EL2); cpu_if->its_vpe.its_vm)
write_gicreg(0, ICH_HCR_EL2);
} }
void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu) void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
......
...@@ -590,6 +590,9 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) ...@@ -590,6 +590,9 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr); kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
kvm_call_hyp(__vgic_v3_restore_aprs, vcpu); kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
if (has_vhe())
__vgic_v3_activate_traps(vcpu);
} }
void vgic_v3_put(struct kvm_vcpu *vcpu) void vgic_v3_put(struct kvm_vcpu *vcpu)
...@@ -600,4 +603,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) ...@@ -600,4 +603,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr); cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
kvm_call_hyp(__vgic_v3_save_aprs, vcpu); kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
if (has_vhe())
__vgic_v3_deactivate_traps(vcpu);
} }
...@@ -773,15 +773,15 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -773,15 +773,15 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (can_access_vgic_from_kernel())
vgic_save_state(vcpu);
WARN_ON(vgic_v4_sync_hwstate(vcpu)); WARN_ON(vgic_v4_sync_hwstate(vcpu));
/* An empty ap_list_head implies used_lrs == 0 */ /* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return; return;
if (can_access_vgic_from_kernel())
vgic_save_state(vcpu);
if (vgic_cpu->used_lrs) if (vgic_cpu->used_lrs)
vgic_fold_lr_state(vcpu); vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu); vgic_prune_ap_list(vcpu);
...@@ -810,7 +810,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -810,7 +810,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
* this. * this.
*/ */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
goto out; return;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
...@@ -818,7 +818,6 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -818,7 +818,6 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
vgic_flush_lr_state(vcpu); vgic_flush_lr_state(vcpu);
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
out:
if (can_access_vgic_from_kernel()) if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu); vgic_restore_state(vcpu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment