Commit d9642117 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: define empty update_mmu_cache() as static inline

Only BOOK3S and FSL_BOOK3E have a usefull update_mmu_cache().

For the others, just define it static inline.

In the meantime, simplify the FSL_BOOK3E related ifdef as
book3e_hugetlb_preload() only exists when CONFIG_PPC_FSL_BOOK3E
is selected.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/668aba4db6b9af6d8a151174e11a4289f1a6bbcd.1565933217.git.christophe.leroy@c-s.fr
parent ad628a34
...@@ -26,5 +26,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ...@@ -26,5 +26,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot); unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT #define __HAVE_PHYS_MEM_ACCESS_PROT
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -293,5 +293,18 @@ static inline int pgd_huge(pgd_t pgd) ...@@ -293,5 +293,18 @@ static inline int pgd_huge(pgd_t pgd)
#define is_hugepd(hpd) (hugepd_ok(hpd)) #define is_hugepd(hpd) (hugepd_ok(hpd))
#endif #endif
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
*/
#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
#else
static inline
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -77,18 +77,6 @@ extern void paging_init(void); ...@@ -77,18 +77,6 @@ extern void paging_init(void);
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#ifndef CONFIG_TRANSPARENT_HUGEPAGE #ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0 #define pmd_large(pmd) 0
#endif #endif
......
...@@ -415,10 +415,10 @@ EXPORT_SYMBOL(flush_icache_user_range); ...@@ -415,10 +415,10 @@ EXPORT_SYMBOL(flush_icache_user_range);
* *
* This must always be called with the pte lock held. * This must always be called with the pte lock held.
*/ */
#ifdef CONFIG_PPC_BOOK3S
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep) pte_t *ptep)
{ {
#ifdef CONFIG_PPC_BOOK3S
/* /*
* We don't need to worry about _PAGE_PRESENT here because we are * We don't need to worry about _PAGE_PRESENT here because we are
* called with either mm->page_table_lock held or ptl lock held * called with either mm->page_table_lock held or ptl lock held
...@@ -456,13 +456,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -456,13 +456,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
} }
hash_preload(vma->vm_mm, address, is_exec, trap); hash_preload(vma->vm_mm, address, is_exec, trap);
}
#endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_PPC_BOOK3S */
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
&& defined(CONFIG_HUGETLB_PAGE) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
if (is_vm_hugetlb_page(vma)) if (is_vm_hugetlb_page(vma))
book3e_hugetlb_preload(vma, address, *ptep); book3e_hugetlb_preload(vma, address, *ptep);
#endif
} }
#endif
/* /*
* System memory should not be in /proc/iomem but various tools expect it * System memory should not be in /proc/iomem but various tools expect it
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment