Commit 3fb69c6a authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/8xx: Enable 512k hugepage support with HW assistance

For using 512k pages with hardware assistance, the PTEs have to be spread
every 128 bytes in the L2 table.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 22569b88
...@@ -74,7 +74,9 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, ...@@ -74,7 +74,9 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned long idx = 0; unsigned long idx = 0;
pte_t *dir = hugepd_page(hpd); pte_t *dir = hugepd_page(hpd);
#ifndef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_8xx
idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
#elif !defined(CONFIG_PPC_FSL_BOOK3E)
idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
#endif #endif
......
...@@ -65,6 +65,9 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -65,6 +65,9 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (pshift >= pdshift) { if (pshift >= pdshift) {
cachep = PGT_CACHE(PTE_T_ORDER); cachep = PGT_CACHE(PTE_T_ORDER);
num_hugepd = 1 << (pshift - pdshift); num_hugepd = 1 << (pshift - pdshift);
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
cachep = PGT_CACHE(PTE_INDEX_SIZE);
num_hugepd = 1;
} else { } else {
cachep = PGT_CACHE(pdshift - pshift); cachep = PGT_CACHE(pdshift - pshift);
num_hugepd = 1; num_hugepd = 1;
...@@ -331,6 +334,9 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif ...@@ -331,6 +334,9 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
if (shift >= pdshift) if (shift >= pdshift)
hugepd_free(tlb, hugepte); hugepd_free(tlb, hugepte);
else if (IS_ENABLED(CONFIG_PPC_8xx))
pgtable_free_tlb(tlb, hugepte,
get_hugepd_cache_index(PTE_INDEX_SIZE));
else else
pgtable_free_tlb(tlb, hugepte, pgtable_free_tlb(tlb, hugepte,
get_hugepd_cache_index(pdshift - shift)); get_hugepd_cache_index(pdshift - shift));
...@@ -700,7 +706,9 @@ static int __init hugetlbpage_init(void) ...@@ -700,7 +706,9 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't * if we have pdshift and shift value same, we don't
* use pgt cache for hugepd. * use pgt cache for hugepd.
*/ */
if (pdshift > shift) if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
pgtable_cache_add(PTE_INDEX_SIZE);
else if (pdshift > shift)
pgtable_cache_add(pdshift - shift); pgtable_cache_add(pdshift - shift);
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
else else
......
...@@ -97,6 +97,9 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { ...@@ -97,6 +97,9 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.shift = 14, .shift = 14,
}, },
#endif #endif
[MMU_PAGE_512K] = {
.shift = 19,
},
[MMU_PAGE_8M] = { [MMU_PAGE_8M] = {
.shift = 23, .shift = 23,
}, },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment