Commit 378e6a9c authored by Yanan Wang's avatar Yanan Wang Committed by Marc Zyngier

KVM: arm64: Tweak parameters of guest cache maintenance functions

Adjust the parameter "kvm_pfn_t pfn" of __clean_dcache_guest_page
and __invalidate_icache_guest_page to "void *va", which paves the
way for converting these two guest CMO functions into callbacks in
structure kvm_pgtable_mm_ops. No functional change.
Reviewed-by: default avatarFuad Tabba <tabba@google.com>
Signed-off-by: default avatarYanan Wang <wangyanan55@huawei.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210617105824.31752-4-wangyanan55@huawei.com
parent a4d5ca5c
...@@ -187,10 +187,8 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) ...@@ -187,10 +187,8 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
} }
static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) static inline void __clean_dcache_guest_page(void *va, size_t size)
{ {
void *va = page_address(pfn_to_page(pfn));
/* /*
* With FWB, we ensure that the guest always accesses memory using * With FWB, we ensure that the guest always accesses memory using
* cacheable attributes, and we don't have to clean to PoC when * cacheable attributes, and we don't have to clean to PoC when
...@@ -203,16 +201,13 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) ...@@ -203,16 +201,13 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
kvm_flush_dcache_to_poc(va, size); kvm_flush_dcache_to_poc(va, size);
} }
static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, static inline void __invalidate_icache_guest_page(void *va, size_t size)
unsigned long size)
{ {
if (icache_is_aliasing()) { if (icache_is_aliasing()) {
/* any kind of VIPT cache */ /* any kind of VIPT cache */
__flush_icache_all(); __flush_icache_all();
} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
void *va = page_address(pfn_to_page(pfn));
invalidate_icache_range((unsigned long)va, invalidate_icache_range((unsigned long)va,
(unsigned long)va + size); (unsigned long)va + size);
} }
......
...@@ -126,6 +126,16 @@ static void *kvm_host_va(phys_addr_t phys) ...@@ -126,6 +126,16 @@ static void *kvm_host_va(phys_addr_t phys)
return __va(phys); return __va(phys);
} }
static void clean_dcache_guest_page(void *va, size_t size)
{
__clean_dcache_guest_page(va, size);
}
static void invalidate_icache_guest_page(void *va, size_t size)
{
__invalidate_icache_guest_page(va, size);
}
/* /*
* Unmapping vs dcache management: * Unmapping vs dcache management:
* *
...@@ -693,16 +703,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, ...@@ -693,16 +703,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
} }
static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
{
__clean_dcache_guest_page(pfn, size);
}
static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
{
__invalidate_icache_guest_page(pfn, size);
}
static void kvm_send_hwpoison_signal(unsigned long address, short lsb) static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
{ {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
...@@ -972,11 +972,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -972,11 +972,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
prot |= KVM_PGTABLE_PROT_W; prot |= KVM_PGTABLE_PROT_W;
if (fault_status != FSC_PERM && !device) if (fault_status != FSC_PERM && !device)
clean_dcache_guest_page(pfn, vma_pagesize); clean_dcache_guest_page(page_address(pfn_to_page(pfn)),
vma_pagesize);
if (exec_fault) { if (exec_fault) {
prot |= KVM_PGTABLE_PROT_X; prot |= KVM_PGTABLE_PROT_X;
invalidate_icache_guest_page(pfn, vma_pagesize); invalidate_icache_guest_page(page_address(pfn_to_page(pfn)),
vma_pagesize);
} }
if (device) if (device)
...@@ -1178,7 +1180,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -1178,7 +1180,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
* We've moved a page around, probably through CoW, so let's treat it * We've moved a page around, probably through CoW, so let's treat it
* just like a translation fault and clean the cache to the PoC. * just like a translation fault and clean the cache to the PoC.
*/ */
clean_dcache_guest_page(pfn, PAGE_SIZE); clean_dcache_guest_page(page_address(pfn_to_page(pfn)), PAGE_SIZE);
/* /*
* The MMU notifiers will have unmapped a huge PMD before calling * The MMU notifiers will have unmapped a huge PMD before calling
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment