Commit 8a6c697b authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Implement helpers for pagetable fragment support at PMD level

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 0c4d2680
......@@ -42,6 +42,8 @@
/* 8 bytes per each pte entry */
#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3)
#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3)
#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
/* memory key bits, only 8 keys supported */
#define H_PTE_PKEY_BIT0 0
......
......@@ -46,6 +46,13 @@
#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3 + 1)
#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3 + 1)
#else
#define H_PMD_FRAG_SIZE_SHIFT (H_PMD_INDEX_SIZE + 3)
#endif
#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
#ifndef __ASSEMBLY__
#include <asm/errno.h>
......
......@@ -138,6 +138,7 @@ typedef struct {
* pagetable fragment support
*/
void *pte_frag;
void *pmd_frag;
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct list_head iommu_group_mem_list;
#endif
......
......@@ -42,7 +42,9 @@ extern struct kmem_cache *pgtable_cache[];
})
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pte_fragment_free(unsigned long *, int);
extern void pmd_fragment_free(unsigned long *);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
......
......@@ -246,6 +246,12 @@ extern unsigned long __pte_frag_size_shift;
#define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
extern unsigned long __pmd_frag_nr;
#define PMD_FRAG_NR __pmd_frag_nr
extern unsigned long __pmd_frag_size_shift;
#define PMD_FRAG_SIZE_SHIFT __pmd_frag_size_shift
#define PMD_FRAG_SIZE (1UL << PMD_FRAG_SIZE_SHIFT)
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
......
......@@ -15,4 +15,7 @@
#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3)
#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
#define RADIX_PMD_FRAG_SIZE_SHIFT (RADIX_PMD_INDEX_SIZE + 3)
#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
#endif /* _ASM_POWERPC_PGTABLE_RADIX_4K_H */
......@@ -16,4 +16,8 @@
*/
#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3)
#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
#define RADIX_PMD_FRAG_SIZE_SHIFT (RADIX_PMD_INDEX_SIZE + 3)
#define RADIX_PMD_FRAG_NR (PAGE_SIZE >> RADIX_PMD_FRAG_SIZE_SHIFT)
#endif /* _ASM_POWERPC_PGTABLE_RADIX_64K_H */
......@@ -1012,6 +1012,8 @@ void __init hash__early_init_mmu(void)
*/
__pte_frag_nr = H_PTE_FRAG_NR;
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
__pmd_frag_nr = H_PMD_FRAG_NR;
__pmd_frag_size_shift = H_PMD_FRAG_SIZE_SHIFT;
__pte_index_size = H_PTE_INDEX_SIZE;
__pmd_index_size = H_PMD_INDEX_SIZE;
......
......@@ -160,6 +160,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm->context.id = index;
mm->context.pte_frag = NULL;
mm->context.pmd_frag = NULL;
#ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_init(mm);
#endif
......@@ -190,16 +191,11 @@ static void destroy_contexts(mm_context_t *ctx)
spin_unlock(&mmu_context_lock);
}
static void destroy_pagetable_page(struct mm_struct *mm)
static void pte_frag_destroy(void *pte_frag)
{
int count;
void *pte_frag;
struct page *page;
pte_frag = mm->context.pte_frag;
if (!pte_frag)
return;
page = virt_to_page(pte_frag);
/* drop all the pending references */
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
......@@ -210,6 +206,35 @@ static void destroy_pagetable_page(struct mm_struct *mm)
}
}
static void pmd_frag_destroy(void *pmd_frag)
{
int count;
struct page *page;
page = virt_to_page(pmd_frag);
/* drop all the pending references */
count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (page_ref_sub_and_test(page, PMD_FRAG_NR - count)) {
pgtable_pmd_page_dtor(page);
free_unref_page(page);
}
}
static void destroy_pagetable_page(struct mm_struct *mm)
{
void *frag;
frag = mm->context.pte_frag;
if (frag)
pte_frag_destroy(frag);
frag = mm->context.pmd_frag;
if (frag)
pmd_frag_destroy(frag);
return;
}
void destroy_context(struct mm_struct *mm)
{
#ifdef CONFIG_SPAPR_TCE_IOMMU
......
......@@ -20,6 +20,11 @@
#include "mmu_decl.h"
#include <trace/events/thp.h>
unsigned long __pmd_frag_nr;
EXPORT_SYMBOL(__pmd_frag_nr);
unsigned long __pmd_frag_size_shift;
EXPORT_SYMBOL(__pmd_frag_size_shift);
int (*register_process_table)(unsigned long base, unsigned long page_size,
unsigned long tbl_size);
......@@ -226,6 +231,85 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
}
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
{
void *pmd_frag, *ret;
spin_lock(&mm->page_table_lock);
ret = mm->context.pmd_frag;
if (ret) {
pmd_frag = ret + PMD_FRAG_SIZE;
/*
* If we have taken up all the fragments mark PTE page NULL
*/
if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
pmd_frag = NULL;
mm->context.pmd_frag = pmd_frag;
}
spin_unlock(&mm->page_table_lock);
return (pmd_t *)ret;
}
static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
{
void *ret = NULL;
struct page *page;
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
if (mm == &init_mm)
gfp &= ~__GFP_ACCOUNT;
page = alloc_page(gfp);
if (!page)
return NULL;
if (!pgtable_pmd_page_ctor(page)) {
__free_pages(page, 0);
return NULL;
}
ret = page_address(page);
/*
* if we support only one fragment just return the
* allocated page.
*/
if (PMD_FRAG_NR == 1)
return ret;
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
* the allocated page with single fragement
* count.
*/
if (likely(!mm->context.pmd_frag)) {
set_page_count(page, PMD_FRAG_NR);
mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
return (pmd_t *)ret;
}
pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
{
pmd_t *pmd;
pmd = get_pmd_from_cache(mm);
if (pmd)
return pmd;
return __alloc_for_pmdcache(mm);
}
void pmd_fragment_free(unsigned long *pmd)
{
struct page *page = virt_to_page(pmd);
if (put_page_testzero(page)) {
pgtable_pmd_page_dtor(page);
free_unref_page(page);
}
}
static pte_t *get_pte_from_cache(struct mm_struct *mm)
{
void *pte_frag, *ret;
......
......@@ -640,6 +640,8 @@ void __init radix__early_init_mmu(void)
#endif
__pte_frag_nr = RADIX_PTE_FRAG_NR;
__pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
__pmd_frag_nr = RADIX_PMD_FRAG_NR;
__pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
radix_init_native();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment