Commit ac30a11e authored by Marc Zyngier's avatar Marc Zyngier

ARM: KVM: introduce per-vcpu HYP Configuration Register

So far, KVM/ARM used a fixed HCR configuration per guest, except for
the VI/VF/VA bits to control the interrupt in absence of VGIC.

With the upcoming need to dynamically reconfigure trapping, it becomes
necessary to allow the HCR to be changed on a per-vcpu basis.

The fix here is to mimic what KVM/arm64 already does: a per vcpu HCR
field, initialized at setup time.
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 547f7813
...@@ -69,7 +69,6 @@ ...@@ -69,7 +69,6 @@
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_TWE | HCR_SWIO | HCR_TIDCP) HCR_TWE | HCR_SWIO | HCR_TIDCP)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
/* System Control Register (SCTLR) bits */ /* System Control Register (SCTLR) bits */
#define SCTLR_TE (1 << 30) #define SCTLR_TE (1 << 30)
......
...@@ -101,6 +101,12 @@ struct kvm_vcpu_arch { ...@@ -101,6 +101,12 @@ struct kvm_vcpu_arch {
/* The CPU type we expose to the VM */ /* The CPU type we expose to the VM */
u32 midr; u32 midr;
/* HYP trapping configuration */
u32 hcr;
/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */
/* Exception Information */ /* Exception Information */
struct kvm_vcpu_fault_info fault; struct kvm_vcpu_fault_info fault;
...@@ -128,9 +134,6 @@ struct kvm_vcpu_arch { ...@@ -128,9 +134,6 @@ struct kvm_vcpu_arch {
/* IO related fields */ /* IO related fields */
struct kvm_decode mmio_decode; struct kvm_decode mmio_decode;
/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */
/* Cache some mmu pages needed inside spinlock regions */ /* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache; struct kvm_mmu_memory_cache mmu_page_cache;
......
...@@ -174,6 +174,7 @@ int main(void) ...@@ -174,6 +174,7 @@ int main(void)
DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
......
...@@ -38,6 +38,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -38,6 +38,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
vcpu->arch.hcr = HCR_GUEST_MASK;
return 0; return 0;
} }
......
...@@ -597,17 +597,14 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -597,17 +597,14 @@ vcpu .req r0 @ vcpu pointer always in r0
/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
.macro configure_hyp_role operation .macro configure_hyp_role operation
mrc p15, 4, r2, c1, c1, 0 @ HCR
bic r2, r2, #HCR_VIRT_EXCP_MASK
ldr r3, =HCR_GUEST_MASK
.if \operation == vmentry .if \operation == vmentry
orr r2, r2, r3 ldr r2, [vcpu, #VCPU_HCR]
ldr r3, [vcpu, #VCPU_IRQ_LINES] ldr r3, [vcpu, #VCPU_IRQ_LINES]
orr r2, r2, r3 orr r2, r2, r3
.else .else
bic r2, r2, r3 mov r2, #0
.endif .endif
mcr p15, 4, r2, c1, c1, 0 mcr p15, 4, r2, c1, c1, 0 @ HCR
.endm .endm
.macro load_vcpu .macro load_vcpu
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment