Commit 7aa9a23c authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

powerpc, thp: remove infrastructure for handling splitting PMDs

With new refcounting we don't need to mark PMDs splitting.  Let's drop
code to handle this.

pmdp_splitting_flush() is not needed too: on splitting PMD we will do
pmdp_clear_flush() + set_pte_at().  pmdp_clear_flush() will do IPI as
needed for fast_gup.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b2787370
...@@ -256,13 +256,6 @@ static inline int pmd_trans_huge(pmd_t pmd) ...@@ -256,13 +256,6 @@ static inline int pmd_trans_huge(pmd_t pmd)
(_PAGE_PTE | _PAGE_THP_HUGE)); (_PAGE_PTE | _PAGE_THP_HUGE));
} }
static inline int pmd_trans_splitting(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
return pmd_val(pmd) & _PAGE_SPLITTING;
return 0;
}
static inline int pmd_large(pmd_t pmd) static inline int pmd_large(pmd_t pmd)
{ {
return !!(pmd_val(pmd) & _PAGE_PTE); return !!(pmd_val(pmd) & _PAGE_PTE);
...@@ -273,11 +266,6 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd) ...@@ -273,11 +266,6 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
} }
static inline pmd_t pmd_mksplitting(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | _PAGE_SPLITTING);
}
#define __HAVE_ARCH_PMD_SAME #define __HAVE_ARCH_PMD_SAME
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{ {
......
...@@ -40,11 +40,6 @@ ...@@ -40,11 +40,6 @@
#define _PAGE_SOFT_DIRTY 0x00000 #define _PAGE_SOFT_DIRTY 0x00000
#endif #endif
/*
* THP pages can't be special. So use the _PAGE_SPECIAL
*/
#define _PAGE_SPLITTING _PAGE_SPECIAL
/* /*
* We need to differentiate between explicit huge page and THP huge * We need to differentiate between explicit huge page and THP huge
* page, since THP huge page also need to track real subpage details * page, since THP huge page also need to track real subpage details
...@@ -54,9 +49,8 @@ ...@@ -54,9 +49,8 @@
/* /*
* set of bits not changed in pmd_modify. * set of bits not changed in pmd_modify.
*/ */
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ _PAGE_ACCESSED | _PAGE_THP_HUGE)
_PAGE_THP_HUGE | _PAGE_PTE | _PAGE_SOFT_DIRTY)
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
#include <asm/book3s/64/hash-64k.h> #include <asm/book3s/64/hash-64k.h>
......
...@@ -266,10 +266,6 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma, ...@@ -266,10 +266,6 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp); unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp); unsigned long address, pmd_t *pmdp);
#define pmdp_collapse_flush pmdp_collapse_flush #define pmdp_collapse_flush pmdp_collapse_flush
......
...@@ -39,9 +39,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -39,9 +39,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
/* If PMD busy, retry the access */ /* If PMD busy, retry the access */
if (unlikely(old_pmd & _PAGE_BUSY)) if (unlikely(old_pmd & _PAGE_BUSY))
return 0; return 0;
/* If PMD is trans splitting retry the access */
if (unlikely(old_pmd & _PAGE_SPLITTING))
return 0;
/* If PMD permissions don't match, take page fault */ /* If PMD permissions don't match, take page fault */
if (unlikely(access & ~old_pmd)) if (unlikely(access & ~old_pmd))
return 1; return 1;
......
...@@ -958,10 +958,6 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, ...@@ -958,10 +958,6 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
/* /*
* A hugepage collapse is captured by pmd_none, because * A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate. * it mark the pmd none and do a hpte invalidate.
*
* We don't worry about pmd_trans_splitting here, The
* caller if it needs to handle the splitting case
* should check for that.
*/ */
if (pmd_none(pmd)) if (pmd_none(pmd))
return NULL; return NULL;
......
...@@ -603,55 +603,6 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma, ...@@ -603,55 +603,6 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
} }
/*
* We mark the pmd splitting and invalidate all the hpte
* entries for this hugepage.
*/
void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
unsigned long old, tmp;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#ifdef CONFIG_DEBUG_VM
WARN_ON(!pmd_trans_huge(*pmdp));
assert_spin_locked(&vma->vm_mm->page_table_lock);
#endif
#ifdef PTE_ATOMIC_UPDATES
__asm__ __volatile__(
"1: ldarx %0,0,%3\n\
andi. %1,%0,%6\n\
bne- 1b \n\
oris %1,%0,%4@h \n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
: "r" (pmdp), "i" (_PAGE_SPLITTING), "m" (*pmdp), "i" (_PAGE_BUSY)
: "cc" );
#else
old = pmd_val(*pmdp);
*pmdp = __pmd(old | _PAGE_SPLITTING);
#endif
/*
* If we didn't had the splitting flag set, go and flush the
* HPTE entries.
*/
trace_hugepage_splitting(address, old);
if (!(old & _PAGE_SPLITTING)) {
/* We need to flush the hpte */
if (old & _PAGE_HASHPTE)
hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
}
/*
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
*/
kick_all_cpus_sync();
}
/* /*
* We want to put the pgtable in pmd and use pgtable for tracking * We want to put the pgtable in pmd and use pgtable for tracking
* the base page size hptes * the base page size hptes
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment