Commit 26a344ae authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm: Move hugetlb related headers

W.r.t hugetlb, we support two format for pmd. With book3s_64 and
64K linux page size, we can have pte at the pmd level. Hence we
don't need to support hugepd there. For everything else hugepd
is supported and pmd_huge is (0).
Acked-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 40e8550a
...@@ -93,6 +93,37 @@ extern struct page *pgd_page(pgd_t pgd); ...@@ -93,6 +93,37 @@ extern struct page *pgd_page(pgd_t pgd);
#define remap_4k_pfn(vma, addr, pfn, prot) \ #define remap_4k_pfn(vma, addr, pfn, prot) \
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
#ifdef CONFIG_HUGETLB_PAGE
/*
* For 4k page size, we support explicit hugepage via hugepd
*/
static inline int pmd_huge(pmd_t pmd)
{
return 0;
}
static inline int pud_huge(pud_t pud)
{
return 0;
}
static inline int pgd_huge(pgd_t pgd)
{
return 0;
}
#define pgd_huge pgd_huge
static inline int hugepd_ok(hugepd_t hpd)
{
/*
* hugepd pointer, bottom two bits == 00 and next 4 bits
* indicate size of table
*/
return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
}
#define is_hugepd(hpd) (hugepd_ok(hpd))
#endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
...@@ -119,6 +119,57 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); ...@@ -119,6 +119,57 @@ extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd })))
#define pte_pgd(pte) ((pgd_t)pte_pud(pte)) #define pte_pgd(pte) ((pgd_t)pte_pud(pte))
#ifdef CONFIG_HUGETLB_PAGE
/*
* We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
* 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
*
* Defined in such a way that we can optimize away code block at build time
* if CONFIG_HUGETLB_PAGE=n.
*/
static inline int pmd_huge(pmd_t pmd)
{
/*
* leaf pte for huge page, bottom two bits != 00
*/
return ((pmd_val(pmd) & 0x3) != 0x0);
}
static inline int pud_huge(pud_t pud)
{
/*
* leaf pte for huge page, bottom two bits != 00
*/
return ((pud_val(pud) & 0x3) != 0x0);
}
static inline int pgd_huge(pgd_t pgd)
{
/*
* leaf pte for huge page, bottom two bits != 00
*/
return ((pgd_val(pgd) & 0x3) != 0x0);
}
#define pgd_huge pgd_huge
#ifdef CONFIG_DEBUG_VM
extern int hugepd_ok(hugepd_t hpd);
#define is_hugepd(hpd) (hugepd_ok(hpd))
#else
/*
* With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
* need to setup hugepage directory for them. Our pte and page directory format
* enable us to have this enabled.
*/
static inline int hugepd_ok(hugepd_t hpd)
{
return 0;
}
#define is_hugepd(pdep) 0
#endif /* CONFIG_DEBUG_VM */
#endif /* CONFIG_HUGETLB_PAGE */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
...@@ -223,5 +223,30 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, ...@@ -223,5 +223,30 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot); unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT #define __HAVE_PHYS_MEM_ACCESS_PROT
#ifdef CONFIG_HUGETLB_PAGE
static inline int hugepd_ok(hugepd_t hpd)
{
return (hpd.pd > 0);
}
static inline int pmd_huge(pmd_t pmd)
{
return 0;
}
static inline int pud_huge(pud_t pud)
{
return 0;
}
static inline int pgd_huge(pgd_t pgd)
{
return 0;
}
#define pgd_huge pgd_huge
#define is_hugepd(hpd) (hugepd_ok(hpd))
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -387,45 +387,11 @@ typedef unsigned long pgprot_t; ...@@ -387,45 +387,11 @@ typedef unsigned long pgprot_t;
typedef struct { signed long pd; } hugepd_t; typedef struct { signed long pd; } hugepd_t;
#ifdef CONFIG_HUGETLB_PAGE #ifndef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_PPC_BOOK3S_64 #define is_hugepd(pdep) (0)
#ifdef CONFIG_PPC_64K_PAGES #define pgd_huge(pgd) (0)
/*
* With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
* need to setup hugepage directory for them. Our pte and page directory format
* enable us to have this enabled. But to avoid errors when implementing new
* features disable hugepd for 64K. We enable a debug version here, So we catch
* wrong usage.
*/
#ifdef CONFIG_DEBUG_VM
extern int hugepd_ok(hugepd_t hpd);
#else
#define hugepd_ok(x) (0)
#endif
#else
static inline int hugepd_ok(hugepd_t hpd)
{
/*
* hugepd pointer, bottom two bits == 00 and next 4 bits
* indicate size of table
*/
return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
}
#endif
#else
static inline int hugepd_ok(hugepd_t hpd)
{
return (hpd.pd > 0);
}
#endif
#define is_hugepd(hpd) (hugepd_ok(hpd))
#define pgd_huge pgd_huge
int pgd_huge(pgd_t pgd);
#else /* CONFIG_HUGETLB_PAGE */
#define is_hugepd(pdep) 0
#define pgd_huge(pgd) 0
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
#define __hugepd(x) ((hugepd_t) { (x) }) #define __hugepd(x) ((hugepd_t) { (x) })
struct page; struct page;
......
...@@ -114,3 +114,21 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -114,3 +114,21 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
*ptep = __pte(new_pte & ~_PAGE_BUSY); *ptep = __pte(new_pte & ~_PAGE_BUSY);
return 0; return 0;
} }
#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
/*
* This enables us to catch the wrong page directory format
* Moved here so that we can use WARN() in the call.
*/
int hugepd_ok(hugepd_t hpd)
{
bool is_hugepd;
/*
* We should not find this format in page directory, warn otherwise.
*/
is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
WARN(is_hugepd, "Found wrong page directory format\n");
return 0;
}
#endif
...@@ -53,78 +53,6 @@ static unsigned nr_gpages; ...@@ -53,78 +53,6 @@ static unsigned nr_gpages;
#define hugepd_none(hpd) ((hpd).pd == 0) #define hugepd_none(hpd) ((hpd).pd == 0)
#ifdef CONFIG_PPC_BOOK3S_64
/*
* At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs.
*/
/*
* We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
* 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
*
* Defined in such a way that we can optimize away code block at build time
* if CONFIG_HUGETLB_PAGE=n.
*/
int pmd_huge(pmd_t pmd)
{
/*
* leaf pte for huge page, bottom two bits != 00
*/
return ((pmd_val(pmd) & 0x3) != 0x0);
}
int pud_huge(pud_t pud)
{
/*
* leaf pte for huge page, bottom two bits != 00
*/
return ((pud_val(pud) & 0x3) != 0x0);
}
int pgd_huge(pgd_t pgd)
{
/*
* leaf pte for huge page, bottom two bits != 00
*/
return ((pgd_val(pgd) & 0x3) != 0x0);
}
#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
/*
* This enables us to catch the wrong page directory format
* Moved here so that we can use WARN() in the call.
*/
int hugepd_ok(hugepd_t hpd)
{
bool is_hugepd;
/*
* We should not find this format in page directory, warn otherwise.
*/
is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
WARN(is_hugepd, "Found wrong page directory format\n");
return 0;
}
#endif
#else
int pmd_huge(pmd_t pmd)
{
return 0;
}
int pud_huge(pud_t pud)
{
return 0;
}
int pgd_huge(pgd_t pgd)
{
return 0;
}
#endif
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{ {
/* Only called for hugetlbfs pages, hence can ignore THP */ /* Only called for hugetlbfs pages, hence can ignore THP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment