Commit da9554e0 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc: Refactor update_mmu_cache_range()

On nohash, this function voids except for E500 with hugepages.

On book3s, this function is for hash MMUs only.

Combine those tests and rename E500 update_mmu_cache_range()
as __update_mmu_cache() which gets called by
update_mmu_cache_range().
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/b029842cb6783cbeb43d202e69a90341d65295a4.1695659959.git.christophe.leroy@csgroup.eu
parent 93f81f6e
...@@ -8,28 +8,4 @@ ...@@ -8,28 +8,4 @@
#include <asm/book3s/32/pgtable.h> #include <asm/book3s/32/pgtable.h>
#endif #endif
#ifndef __ASSEMBLY__
void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
return;
if (radix_enabled())
return;
__update_mmu_cache(vma, address, ptep);
}
#endif /* __ASSEMBLY__ */
#endif #endif
...@@ -259,20 +259,5 @@ static inline int pud_huge(pud_t pud) ...@@ -259,20 +259,5 @@ static inline int pud_huge(pud_t pud)
#define is_hugepd(hpd) (hugepd_ok(hpd)) #define is_hugepd(hpd) (hugepd_ok(hpd))
#endif #endif
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
*/
#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int nr);
#else
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr) {}
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -119,6 +119,25 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ...@@ -119,6 +119,25 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot); unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT #define __HAVE_PHYS_MEM_ACCESS_PROT
void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
(IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
__update_mmu_cache(vma, address, ptep);
}
/* /*
* When used, PTE_FRAG_NR is defined in subarch pgtable.h * When used, PTE_FRAG_NR is defined in subarch pgtable.h
......
...@@ -178,8 +178,7 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) ...@@ -178,8 +178,7 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
* *
* This must always be called with the pte lock held. * This must always be called with the pte lock held.
*/ */
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
unsigned long address, pte_t *ptep, unsigned int nr)
{ {
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
book3e_hugetlb_preload(vma, address, *ptep); book3e_hugetlb_preload(vma, address, *ptep);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment