Commit deb151a5 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/mmu/vmid-cleanups into kvmarm-master/next

* kvm-arm64/mmu/vmid-cleanups:
  : Cleanup the stage-2 configuration by providing a single helper,
  : and tidy up some of the ordering requirements for the VMID
  : allocator.
  KVM: arm64: Upgrade VMID accesses to {READ,WRITE}_ONCE
  KVM: arm64: Unify stage-2 programming behind __load_stage2()
  KVM: arm64: Move kern_hyp_va() usage in __load_guest_stage2() into the callers
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents ca3385a5 cf364e08
...@@ -252,6 +252,11 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, ...@@ -252,6 +252,11 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
/*
* When this is (directly or indirectly) used on the TLB invalidation
* path, we rely on a previously issued DSB so that page table updates
* and VMID reads are correctly ordered.
*/
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
{ {
struct kvm_vmid *vmid = &mmu->vmid; struct kvm_vmid *vmid = &mmu->vmid;
...@@ -259,7 +264,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) ...@@ -259,7 +264,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
baddr = mmu->pgd_phys; baddr = mmu->pgd_phys;
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
} }
...@@ -267,9 +272,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) ...@@ -267,9 +272,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
* Must be called from hyp code running at EL2 with an updated VTTBR * Must be called from hyp code running at EL2 with an updated VTTBR
* and interrupts disabled. * and interrupts disabled.
*/ */
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr) static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
struct kvm_arch *arch)
{ {
write_sysreg(vtcr, vtcr_el2); write_sysreg(arch->vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
/* /*
...@@ -280,11 +286,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long ...@@ -280,11 +286,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
} }
static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
{
__load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr);
}
static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
{ {
return container_of(mmu->arch, struct kvm, arch); return container_of(mmu->arch, struct kvm, arch);
......
...@@ -573,7 +573,7 @@ static void update_vmid(struct kvm_vmid *vmid) ...@@ -573,7 +573,7 @@ static void update_vmid(struct kvm_vmid *vmid)
kvm_call_hyp(__kvm_flush_vm_context); kvm_call_hyp(__kvm_flush_vm_context);
} }
vmid->vmid = kvm_next_vmid; WRITE_ONCE(vmid->vmid, kvm_next_vmid);
kvm_next_vmid++; kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
......
...@@ -60,7 +60,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt); ...@@ -60,7 +60,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
static __always_inline void __load_host_stage2(void) static __always_inline void __load_host_stage2(void)
{ {
if (static_branch_likely(&kvm_protected_mode_initialized)) if (static_branch_likely(&kvm_protected_mode_initialized))
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
else else
write_sysreg(0, vttbr_el2); write_sysreg(0, vttbr_el2);
} }
......
...@@ -112,8 +112,8 @@ int kvm_host_prepare_stage2(void *pgt_pool_base) ...@@ -112,8 +112,8 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
mmu->arch = &host_kvm.arch; mmu->arch = &host_kvm.arch;
mmu->pgt = &host_kvm.pgt; mmu->pgt = &host_kvm.pgt;
mmu->vmid.vmid_gen = 0; WRITE_ONCE(mmu->vmid.vmid_gen, 0);
mmu->vmid.vmid = 0; WRITE_ONCE(mmu->vmid.vmid, 0);
return 0; return 0;
} }
...@@ -129,7 +129,7 @@ int __pkvm_prot_finalize(void) ...@@ -129,7 +129,7 @@ int __pkvm_prot_finalize(void)
kvm_flush_dcache_to_poc(params, sizeof(*params)); kvm_flush_dcache_to_poc(params, sizeof(*params));
write_sysreg(params->hcr_el2, hcr_el2); write_sysreg(params->hcr_el2, hcr_el2);
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
/* /*
* Make sure to have an ISB before the TLB maintenance below but only * Make sure to have an ISB before the TLB maintenance below but only
......
...@@ -170,6 +170,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -170,6 +170,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
struct kvm_s2_mmu *mmu;
bool pmu_switch_needed; bool pmu_switch_needed;
u64 exit_code; u64 exit_code;
...@@ -213,7 +214,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -213,7 +214,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg32_restore_state(vcpu); __sysreg32_restore_state(vcpu);
__sysreg_restore_state_nvhe(guest_ctxt); __sysreg_restore_state_nvhe(guest_ctxt);
__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu)); mmu = kern_hyp_va(vcpu->arch.hw_mmu);
__load_stage2(mmu, kern_hyp_va(mmu->arch));
__activate_traps(vcpu); __activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu); __hyp_vgic_restore_state(vcpu);
......
...@@ -34,12 +34,12 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, ...@@ -34,12 +34,12 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
} }
/* /*
* __load_guest_stage2() includes an ISB only when the AT * __load_stage2() includes an ISB only when the AT
* workaround is applied. Take care of the opposite condition, * workaround is applied. Take care of the opposite condition,
* ensuring that we always have an ISB, but not two ISBs back * ensuring that we always have an ISB, but not two ISBs back
* to back. * to back.
*/ */
__load_guest_stage2(mmu); __load_stage2(mmu, kern_hyp_va(mmu->arch));
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
} }
......
...@@ -124,11 +124,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -124,11 +124,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
* *
* We have already configured the guest's stage 1 translation in * We have already configured the guest's stage 1 translation in
* kvm_vcpu_load_sysregs_vhe above. We must now call * kvm_vcpu_load_sysregs_vhe above. We must now call
* __load_guest_stage2 before __activate_traps, because * __load_stage2 before __activate_traps, because
* __load_guest_stage2 configures stage 2 translation, and * __load_stage2 configures stage 2 translation, and
* __activate_traps clear HCR_EL2.TGE (among other things). * __activate_traps clear HCR_EL2.TGE (among other things).
*/ */
__load_guest_stage2(vcpu->arch.hw_mmu); __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
__activate_traps(vcpu); __activate_traps(vcpu);
__kvm_adjust_pc(vcpu); __kvm_adjust_pc(vcpu);
......
...@@ -50,10 +50,10 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, ...@@ -50,10 +50,10 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
* *
* ARM erratum 1165522 requires some special handling (again), * ARM erratum 1165522 requires some special handling (again),
* as we need to make sure both stages of translation are in * as we need to make sure both stages of translation are in
* place before clearing TGE. __load_guest_stage2() already * place before clearing TGE. __load_stage2() already
* has an ISB in order to deal with this. * has an ISB in order to deal with this.
*/ */
__load_guest_stage2(mmu); __load_stage2(mmu, mmu->arch);
val = read_sysreg(hcr_el2); val = read_sysreg(hcr_el2);
val &= ~HCR_TGE; val &= ~HCR_TGE;
write_sysreg(val, hcr_el2); write_sysreg(val, hcr_el2);
......
...@@ -532,7 +532,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) ...@@ -532,7 +532,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
mmu->arch = &kvm->arch; mmu->arch = &kvm->arch;
mmu->pgt = pgt; mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd); mmu->pgd_phys = __pa(pgt->pgd);
mmu->vmid.vmid_gen = 0; WRITE_ONCE(mmu->vmid.vmid_gen, 0);
return 0; return 0;
out_destroy_pgtable: out_destroy_pgtable:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment