Commit 13b7756c authored by Marc Zyngier's avatar Marc Zyngier

arm/arm64: KVM: Stop propagating cacheability status of a faulted page

Now that we unconditionally flush newly mapped pages to the PoC,
there is no need to care about the "uncached" status of individual
pages - they must all be visible all the way down.
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 8f36ebaf
...@@ -129,8 +129,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) ...@@ -129,8 +129,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn, kvm_pfn_t pfn,
unsigned long size, unsigned long size)
bool ipa_uncached)
{ {
/* /*
* If we are going to insert an instruction page and the icache is * If we are going to insert an instruction page and the icache is
......
...@@ -1232,9 +1232,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, ...@@ -1232,9 +1232,9 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
} }
static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn, static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
unsigned long size, bool uncached) unsigned long size)
{ {
__coherent_cache_guest_page(vcpu, pfn, size, uncached); __coherent_cache_guest_page(vcpu, pfn, size);
} }
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
...@@ -1250,7 +1250,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1250,7 +1250,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct vm_area_struct *vma; struct vm_area_struct *vma;
kvm_pfn_t pfn; kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2; pgprot_t mem_type = PAGE_S2;
bool fault_ipa_uncached;
bool logging_active = memslot_is_logging(memslot); bool logging_active = memslot_is_logging(memslot);
unsigned long flags = 0; unsigned long flags = 0;
...@@ -1337,8 +1336,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1337,8 +1336,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (!hugetlb && !force_pte) if (!hugetlb && !force_pte)
hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
if (hugetlb) { if (hugetlb) {
pmd_t new_pmd = pfn_pmd(pfn, mem_type); pmd_t new_pmd = pfn_pmd(pfn, mem_type);
new_pmd = pmd_mkhuge(new_pmd); new_pmd = pmd_mkhuge(new_pmd);
...@@ -1346,7 +1343,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1346,7 +1343,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
new_pmd = kvm_s2pmd_mkwrite(new_pmd); new_pmd = kvm_s2pmd_mkwrite(new_pmd);
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
} }
coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else { } else {
pte_t new_pte = pfn_pte(pfn, mem_type); pte_t new_pte = pfn_pte(pfn, mem_type);
...@@ -1356,7 +1353,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1356,7 +1353,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
mark_page_dirty(kvm, gfn); mark_page_dirty(kvm, gfn);
} }
coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
} }
......
...@@ -236,8 +236,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) ...@@ -236,8 +236,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn, kvm_pfn_t pfn,
unsigned long size, unsigned long size)
bool ipa_uncached)
{ {
void *va = page_address(pfn_to_page(pfn)); void *va = page_address(pfn_to_page(pfn));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment