Commit cfb1a98d authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier

KVM: arm64: Use kvm_arch in kvm_s2_mmu

In order to make use of the stage 2 pgtable code for the host stage 2,
change kvm_s2_mmu to use a kvm_arch pointer in lieu of the kvm pointer,
as the host will have the former but not the latter.
Acked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-21-qperret@google.com
parent 834cd93d
...@@ -94,7 +94,7 @@ struct kvm_s2_mmu { ...@@ -94,7 +94,7 @@ struct kvm_s2_mmu {
/* The last vcpu id that ran on each physical CPU */ /* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran; int __percpu *last_vcpu_ran;
struct kvm *kvm; struct kvm_arch *arch;
}; };
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
......
...@@ -272,7 +272,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) ...@@ -272,7 +272,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
*/ */
static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
{ {
write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2); write_sysreg(kern_hyp_va(mmu->arch)->vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
/* /*
...@@ -283,5 +283,9 @@ static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) ...@@ -283,5 +283,9 @@ static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
} }
static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
{
return container_of(mmu->arch, struct kvm, arch);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */ #endif /* __ARM64_KVM_MMU_H__ */
...@@ -165,7 +165,7 @@ static void *kvm_host_va(phys_addr_t phys) ...@@ -165,7 +165,7 @@ static void *kvm_host_va(phys_addr_t phys)
static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
bool may_block) bool may_block)
{ {
struct kvm *kvm = mmu->kvm; struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
phys_addr_t end = start + size; phys_addr_t end = start + size;
assert_spin_locked(&kvm->mmu_lock); assert_spin_locked(&kvm->mmu_lock);
...@@ -470,7 +470,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) ...@@ -470,7 +470,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
mmu->kvm = kvm; mmu->arch = &kvm->arch;
mmu->pgt = pgt; mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd); mmu->pgd_phys = __pa(pgt->pgd);
mmu->vmid.vmid_gen = 0; mmu->vmid.vmid_gen = 0;
...@@ -552,7 +552,7 @@ void stage2_unmap_vm(struct kvm *kvm) ...@@ -552,7 +552,7 @@ void stage2_unmap_vm(struct kvm *kvm)
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
{ {
struct kvm *kvm = mmu->kvm; struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
struct kvm_pgtable *pgt = NULL; struct kvm_pgtable *pgt = NULL;
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
...@@ -621,7 +621,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, ...@@ -621,7 +621,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
*/ */
static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
{ {
struct kvm *kvm = mmu->kvm; struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment