Commit ebd31197 authored by Oliver O'Halloran's avatar Oliver O'Halloran Committed by Michael Ellerman

powerpc/mm: Add devmap support for ppc64

Add support for the devmap bit on PTEs and PMDs for PPC64 Book3S.  This
is used to differentiate device backed memory from transparent huge
pages since they are handled in more or less the same manner by the core
mm code.

Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarOliver O'Halloran <oohall@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent b584c254
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/mmdebug.h> #include <linux/mmdebug.h>
#include <linux/bug.h>
#endif #endif
/* /*
...@@ -79,6 +80,9 @@ ...@@ -79,6 +80,9 @@
#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
#define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */
#define __HAVE_ARCH_PTE_DEVMAP
/* /*
* Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
* Instead of fixing all of them, add an alternate define which * Instead of fixing all of them, add an alternate define which
...@@ -599,6 +603,16 @@ static inline pte_t pte_mkhuge(pte_t pte) ...@@ -599,6 +603,16 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte; return pte;
} }
static inline pte_t pte_mkdevmap(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
}
static inline int pte_devmap(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DEVMAP));
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
/* FIXME!! check whether this need to be a conditional */ /* FIXME!! check whether this need to be a conditional */
...@@ -1146,6 +1160,37 @@ static inline bool arch_needs_pgtable_deposit(void) ...@@ -1146,6 +1160,37 @@ static inline bool arch_needs_pgtable_deposit(void)
return true; return true;
} }
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
}
static inline int pmd_devmap(pmd_t pmd)
{
return pte_devmap(pmd_pte(pmd));
}
static inline int pud_devmap(pud_t pud)
{
return 0;
}
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline const int pud_pfn(pud_t pud)
{
/*
* Currently all calls to pud_pfn() are gated around a pud_devmap()
* check so this should never be used. If it grows another user we
* want to know about it.
*/
BUILD_BUG();
return 0;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
...@@ -252,7 +252,7 @@ static inline int radix__pgd_bad(pgd_t pgd) ...@@ -252,7 +252,7 @@ static inline int radix__pgd_bad(pgd_t pgd)
static inline int radix__pmd_trans_huge(pmd_t pmd) static inline int radix__pmd_trans_huge(pmd_t pmd)
{ {
return !!(pmd_val(pmd) & _PAGE_PTE); return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
} }
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
......
...@@ -964,7 +964,7 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, ...@@ -964,7 +964,7 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
if (pmd_none(pmd)) if (pmd_none(pmd))
return NULL; return NULL;
if (pmd_trans_huge(pmd)) { if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
if (is_thp) if (is_thp)
*is_thp = true; *is_thp = true;
ret_pte = (pte_t *) pmdp; ret_pte = (pte_t *) pmdp;
......
...@@ -32,7 +32,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, ...@@ -32,7 +32,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
{ {
int changed; int changed;
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
WARN_ON(!pmd_trans_huge(*pmdp)); WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
assert_spin_locked(&vma->vm_mm->page_table_lock); assert_spin_locked(&vma->vm_mm->page_table_lock);
#endif #endif
changed = !pmd_same(*(pmdp), entry); changed = !pmd_same(*(pmdp), entry);
...@@ -59,7 +59,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -59,7 +59,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
assert_spin_locked(&mm->page_table_lock); assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd)); WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
#endif #endif
trace_hugepage_set_pmd(addr, pmd_val(pmd)); trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
......
...@@ -184,7 +184,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr ...@@ -184,7 +184,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr
unsigned long old; unsigned long old;
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
WARN_ON(!pmd_trans_huge(*pmdp)); WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
assert_spin_locked(&mm->page_table_lock); assert_spin_locked(&mm->page_table_lock);
#endif #endif
...@@ -216,6 +216,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres ...@@ -216,6 +216,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp)); VM_BUG_ON(pmd_trans_huge(*pmdp));
VM_BUG_ON(pmd_devmap(*pmdp));
pmd = *pmdp; pmd = *pmdp;
pmd_clear(pmdp); pmd_clear(pmdp);
...@@ -296,6 +297,7 @@ void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma, ...@@ -296,6 +297,7 @@ void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
{ {
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(REGION_ID(address) != USER_REGION_ID); VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
VM_BUG_ON(pmd_devmap(*pmdp));
/* /*
* We can't mark the pmd none here, because that will cause a race * We can't mark the pmd none here, because that will cause a race
......
...@@ -696,7 +696,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add ...@@ -696,7 +696,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add
unsigned long old; unsigned long old;
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
WARN_ON(!radix__pmd_trans_huge(*pmdp)); WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
assert_spin_locked(&mm->page_table_lock); assert_spin_locked(&mm->page_table_lock);
#endif #endif
...@@ -714,6 +714,7 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre ...@@ -714,6 +714,7 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
VM_BUG_ON(pmd_devmap(*pmdp));
/* /*
* khugepaged calls this for normal pmd * khugepaged calls this for normal pmd
*/ */
......
...@@ -324,7 +324,7 @@ struct page *pud_page(pud_t pud) ...@@ -324,7 +324,7 @@ struct page *pud_page(pud_t pud)
*/ */
struct page *pmd_page(pmd_t pmd) struct page *pmd_page(pmd_t pmd)
{ {
if (pmd_trans_huge(pmd) || pmd_huge(pmd)) if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
return pte_page(pmd_pte(pmd)); return pte_page(pmd_pte(pmd));
return virt_to_page(pmd_page_vaddr(pmd)); return virt_to_page(pmd_page_vaddr(pmd));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment