Commit 32ea4c14 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/mm: Extend pte_fragment functionality to PPC32

In order to allow the 8xx to handle pte_fragments, this patch
extends the use of pte_fragments to PPC32 platforms.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a74791dd
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ #ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ #define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
/* /*
* 32-bit hash table MMU support * 32-bit hash table MMU support
*/ */
...@@ -9,6 +10,8 @@ ...@@ -9,6 +10,8 @@
* BATs * BATs
*/ */
#include <asm/page.h>
/* Block size masks */ /* Block size masks */
#define BL_128K 0x000 #define BL_128K 0x000
#define BL_256K 0x001 #define BL_256K 0x001
...@@ -43,7 +46,7 @@ struct ppc_bat { ...@@ -43,7 +46,7 @@ struct ppc_bat {
u32 batl; u32 batl;
}; };
typedef struct page *pgtable_t; typedef pte_t *pgtable_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* /*
......
...@@ -59,30 +59,31 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, ...@@ -59,30 +59,31 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page) pgtable_t pte_page)
{ {
*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); *pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT);
} }
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
void pte_frag_destroy(void *pte_frag);
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{ {
free_page((unsigned long)pte); pte_fragment_free((unsigned long *)pte, 1);
} }
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{ {
pgtable_page_dtor(ptepage); pte_fragment_free((unsigned long *)ptepage, 0);
__free_page(ptepage);
} }
static inline void pgtable_free(void *table, unsigned index_size) static inline void pgtable_free(void *table, unsigned index_size)
{ {
if (!index_size) { if (!index_size) {
pgtable_page_dtor(virt_to_page(table)); pte_fragment_free((unsigned long *)table, 0);
free_page((unsigned long)table);
} else { } else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table); kmem_cache_free(PGT_CACHE(index_size), table);
...@@ -120,6 +121,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, ...@@ -120,6 +121,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address) unsigned long address)
{ {
pgtable_free_tlb(tlb, page_address(table), 0); pgtable_free_tlb(tlb, table, 0);
} }
#endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */ #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
...@@ -329,7 +329,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -329,7 +329,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
#define pmd_page_vaddr(pmd) \ #define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \ #define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
...@@ -346,7 +346,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -346,7 +346,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define pte_offset_kernel(dir, addr) \ #define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \ #define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
(pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte) #define pte_unmap(pte) kunmap_atomic(pte)
/* /*
......
...@@ -223,7 +223,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, ...@@ -223,7 +223,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
return 0; return 0;
} }
#ifndef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3E_64
static inline void arch_exit_mmap(struct mm_struct *mm) static inline void arch_exit_mmap(struct mm_struct *mm)
{ {
} }
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_NOHASH_32_MMU_H_ #ifndef _ASM_POWERPC_NOHASH_32_MMU_H_
#define _ASM_POWERPC_NOHASH_32_MMU_H_ #define _ASM_POWERPC_NOHASH_32_MMU_H_
#include <asm/page.h>
#if defined(CONFIG_40x) #if defined(CONFIG_40x)
/* 40x-style software loaded TLB */ /* 40x-style software loaded TLB */
#include <asm/nohash/32/mmu-40x.h> #include <asm/nohash/32/mmu-40x.h>
...@@ -17,7 +19,7 @@ ...@@ -17,7 +19,7 @@
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef struct page *pgtable_t; typedef pte_t *pgtable_t;
#endif #endif
#endif /* _ASM_POWERPC_NOHASH_32_MMU_H_ */ #endif /* _ASM_POWERPC_NOHASH_32_MMU_H_ */
...@@ -61,11 +61,10 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, ...@@ -61,11 +61,10 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page) pgtable_t pte_page)
{ {
*pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER | *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT);
_PMD_PRESENT);
} }
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
#else #else
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
...@@ -77,31 +76,32 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, ...@@ -77,31 +76,32 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page) pgtable_t pte_page)
{ {
*pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT);
} }
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
#endif #endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
void pte_frag_destroy(void *pte_frag);
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{ {
free_page((unsigned long)pte); pte_fragment_free((unsigned long *)pte, 1);
} }
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{ {
pgtable_page_dtor(ptepage); pte_fragment_free((unsigned long *)ptepage, 0);
__free_page(ptepage);
} }
static inline void pgtable_free(void *table, unsigned index_size) static inline void pgtable_free(void *table, unsigned index_size)
{ {
if (!index_size) { if (!index_size) {
pgtable_page_dtor(virt_to_page(table)); pte_fragment_free((unsigned long *)table, 0);
free_page((unsigned long)table);
} else { } else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table); kmem_cache_free(PGT_CACHE(index_size), table);
...@@ -140,6 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, ...@@ -140,6 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address) unsigned long address)
{ {
tlb_flush_pgtable(tlb, address); tlb_flush_pgtable(tlb, address);
pgtable_free_tlb(tlb, page_address(table), 0); pgtable_free_tlb(tlb, table, 0);
} }
#endif /* _ASM_POWERPC_PGALLOC_32_H */ #endif /* _ASM_POWERPC_PGALLOC_32_H */
...@@ -333,12 +333,12 @@ static inline int pte_young(pte_t pte) ...@@ -333,12 +333,12 @@ static inline int pte_young(pte_t pte)
*/ */
#ifndef CONFIG_BOOKE #ifndef CONFIG_BOOKE
#define pmd_page_vaddr(pmd) \ #define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \ #define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else #else
#define pmd_page_vaddr(pmd) \ #define pmd_page_vaddr(pmd) \
((unsigned long) (pmd_val(pmd) & PAGE_MASK)) ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \ #define pmd_page(pmd) \
pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
#endif #endif
...@@ -357,7 +357,8 @@ static inline int pte_young(pte_t pte) ...@@ -357,7 +357,8 @@ static inline int pte_young(pte_t pte)
(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
pte_index(addr)) pte_index(addr))
#define pte_offset_map(dir, addr) \ #define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
(pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte) #define pte_unmap(pte) kunmap_atomic(pte)
/* /*
......
...@@ -125,6 +125,10 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p) ...@@ -125,6 +125,10 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
ctx->pte_frag = p; ctx->pte_frag = p;
} }
#else #else
#define PTE_FRAG_NR 1
#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
static inline void *pte_frag_get(mm_context_t *ctx) static inline void *pte_frag_get(mm_context_t *ctx)
{ {
return NULL; return NULL;
......
...@@ -18,6 +18,7 @@ obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o ...@@ -18,6 +18,7 @@ obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \ obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \
$(hash64-y) mmu_context_book3s64.o \ $(hash64-y) mmu_context_book3s64.o \
pgtable-book3s64.o pgtable-frag.o pgtable-book3s64.o pgtable-frag.o
obj-$(CONFIG_PPC32) += pgtable-frag.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#if defined(CONFIG_PPC32) #if defined(CONFIG_PPC32)
static inline void switch_mm_pgdir(struct task_struct *tsk, static inline void switch_mm_pgdir(struct task_struct *tsk,
...@@ -97,3 +98,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -97,3 +98,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
switch_mmu_context(prev, next, tsk); switch_mmu_context(prev, next, tsk);
} }
#ifdef CONFIG_PPC32
void arch_exit_mmap(struct mm_struct *mm)
{
void *frag = pte_frag_get(&mm->context);
if (frag)
pte_frag_destroy(frag);
}
#endif
...@@ -385,6 +385,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) ...@@ -385,6 +385,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
#endif #endif
mm->context.id = MMU_NO_CONTEXT; mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0; mm->context.active = 0;
pte_frag_set(&mm->context, NULL);
return 0; return 0;
} }
...@@ -487,4 +488,3 @@ void __init mmu_context_init(void) ...@@ -487,4 +488,3 @@ void __init mmu_context_init(void)
next_context = FIRST_CONTEXT; next_context = FIRST_CONTEXT;
nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1; nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
} }
...@@ -45,32 +45,15 @@ extern char etext[], _stext[], _sinittext[], _einittext[]; ...@@ -45,32 +45,15 @@ extern char etext[], _stext[], _sinittext[], _einittext[];
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; if (!slab_is_available())
return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
if (slab_is_available()) { return (pte_t *)pte_fragment_alloc(mm, address, 1);
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
} else {
pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
if (pte)
clear_page(pte);
}
return pte;
} }
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
struct page *ptepage; return (pgtable_t)pte_fragment_alloc(mm, address, 0);
gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT;
ptepage = alloc_pages(flags, 0);
if (!ptepage)
return NULL;
if (!pgtable_page_ctor(ptepage)) {
__free_page(ptepage);
return NULL;
}
return ptepage;
} }
void __iomem * void __iomem *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment