Commit 34536d78 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/8xx: Add a function to early map kernel via huge pages

Add a function to early map kernel memory using huge pages.

For 512k pages, just use standard page table and map in using 512k
pages.

For 8M pages, create a hugepd table and populate the two PGD
entries with it.

This function can only be used to create page tables at startup. Once
the regular SLAB allocation functions replace memblock functions,
this function cannot allocate new pages anymore. However it can still
update existing mappings with new protections.

hugepd_none() macro is moved into asm/hugetlb.h to be usable outside
of mm/hugetlbpage.c

early_pte_alloc_kernel() is made visible.

_PAGE_HUGE flag is now displayed by ptdump.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
[mpe: Change ptdump display to use "huge"]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/68325bcd3b6f93127f7810418a2352c3519066d6.1589866984.git.christophe.leroy@csgroup.eu
parent c8bef10a
...@@ -35,6 +35,11 @@ static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshi ...@@ -35,6 +35,11 @@ static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshi
*hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M); *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M);
} }
static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
{
*hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M);
}
static inline int check_and_get_huge_psize(int shift) static inline int check_and_get_huge_psize(int shift)
{ {
return shift_to_mmu_psize(shift); return shift_to_mmu_psize(shift);
......
...@@ -107,6 +107,8 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr); ...@@ -107,6 +107,8 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned int shift); void pgtable_cache_add(unsigned int shift);
pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32) #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
void mark_initmem_nx(void); void mark_initmem_nx(void);
#else #else
......
...@@ -9,9 +9,11 @@ ...@@ -9,9 +9,11 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/mmu_context.h> #include <linux/mmu_context.h>
#include <linux/hugetlb.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/inst.h> #include <asm/inst.h>
#include <asm/pgalloc.h>
#include <mm/mmu_decl.h> #include <mm/mmu_decl.h>
...@@ -55,6 +57,56 @@ unsigned long p_block_mapped(phys_addr_t pa) ...@@ -55,6 +57,56 @@ unsigned long p_block_mapped(phys_addr_t pa)
return 0; return 0;
} }
static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
{
if (hpd_val(*pmdp) == 0) {
pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
if (!ptep)
return NULL;
hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
}
return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
}
static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
pgprot_t prot, int psize, bool new)
{
pmd_t *pmdp = pmd_ptr_k(va);
pte_t *ptep;
if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
return -EINVAL;
if (new) {
if (WARN_ON(slab_is_available()))
return -EINVAL;
if (psize == MMU_PAGE_512K)
ptep = early_pte_alloc_kernel(pmdp, va);
else
ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
} else {
if (psize == MMU_PAGE_512K)
ptep = pte_offset_kernel(pmdp, va);
else
ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
}
if (WARN_ON(!ptep))
return -ENOMEM;
/* The PTE should never be already present */
if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
return -EINVAL;
set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
return 0;
}
/* /*
* MMU_init_hw does the chip-specific initialization of the MMU hardware. * MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/ */
......
...@@ -61,7 +61,7 @@ static void __init *early_alloc_pgtable(unsigned long size) ...@@ -61,7 +61,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
return ptr; return ptr;
} }
static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
{ {
if (pmd_none(*pmdp)) { if (pmd_none(*pmdp)) {
pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
......
...@@ -11,6 +11,11 @@ ...@@ -11,6 +11,11 @@
static const struct flag_info flag_array[] = { static const struct flag_info flag_array[] = {
{ {
.mask = _PAGE_HUGE,
.val = _PAGE_HUGE,
.set = "huge",
.clear = " ",
}, {
.mask = _PAGE_SH, .mask = _PAGE_SH,
.val = 0, .val = 0,
.set = "user", .set = "user",
......
...@@ -56,6 +56,7 @@ config PPC_8xx ...@@ -56,6 +56,7 @@ config PPC_8xx
select PPC_HAVE_KUEP select PPC_HAVE_KUEP
select PPC_HAVE_KUAP select PPC_HAVE_KUAP
select HAVE_ARCH_VMAP_STACK select HAVE_ARCH_VMAP_STACK
select HUGETLBFS
config 40x config 40x
bool "AMCC 40x" bool "AMCC 40x"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment