Commit 025c5b24 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

thp: optimize away unnecessary page table locking

Currently when we check if we can handle thp as it is or we need to split
it into regular sized pages, we hold page table lock prior to check
whether a given pmd is mapping thp or not.  Because of this, when it's not
"huge pmd" we suffer from unnecessary lock/unlock overhead.  To remove it,
this patch introduces a optimized check function and replace several
similar logics with it.

[akpm@linux-foundation.org: checkpatch fixes]
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5aaabe83
...@@ -394,21 +394,12 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -394,21 +394,12 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
spin_lock(&walk->mm->page_table_lock); if (pmd_trans_huge_lock(pmd, vma) == 1) {
if (pmd_trans_huge(*pmd)) { smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
if (pmd_trans_splitting(*pmd)) {
spin_unlock(&walk->mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
smaps_pte_entry(*(pte_t *)pmd, addr,
HPAGE_PMD_SIZE, walk);
spin_unlock(&walk->mm->page_table_lock); spin_unlock(&walk->mm->page_table_lock);
mss->anonymous_thp += HPAGE_PMD_SIZE; mss->anonymous_thp += HPAGE_PMD_SIZE;
return 0; return 0;
} }
} else {
spin_unlock(&walk->mm->page_table_lock);
}
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
return 0; return 0;
...@@ -705,11 +696,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -705,11 +696,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
/* find the first VMA at or above 'addr' */ /* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr); vma = find_vma(walk->mm, addr);
spin_lock(&walk->mm->page_table_lock); spin_lock(&walk->mm->page_table_lock);
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge_lock(pmd, vma) == 1) {
if (pmd_trans_splitting(*pmd)) {
spin_unlock(&walk->mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
for (; addr != end; addr += PAGE_SIZE) { for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset; unsigned long offset;
...@@ -723,9 +710,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -723,9 +710,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
spin_unlock(&walk->mm->page_table_lock); spin_unlock(&walk->mm->page_table_lock);
return err; return err;
} }
} else {
spin_unlock(&walk->mm->page_table_lock);
}
for (; addr != end; addr += PAGE_SIZE) { for (; addr != end; addr += PAGE_SIZE) {
...@@ -992,12 +976,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, ...@@ -992,12 +976,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *pte; pte_t *pte;
md = walk->private; md = walk->private;
spin_lock(&walk->mm->page_table_lock);
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
if (pmd_trans_splitting(*pmd)) {
spin_unlock(&walk->mm->page_table_lock);
wait_split_huge_page(md->vma->anon_vma, pmd);
} else {
pte_t huge_pte = *(pte_t *)pmd; pte_t huge_pte = *(pte_t *)pmd;
struct page *page; struct page *page;
...@@ -1008,9 +988,6 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, ...@@ -1008,9 +988,6 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock); spin_unlock(&walk->mm->page_table_lock);
return 0; return 0;
} }
} else {
spin_unlock(&walk->mm->page_table_lock);
}
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
return 0; return 0;
......
...@@ -113,6 +113,18 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -113,6 +113,18 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
long adjust_next); long adjust_next);
extern int __pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
static inline int pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
if (pmd_trans_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return 0;
}
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
...@@ -176,6 +188,11 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -176,6 +188,11 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next) long adjust_next)
{ {
} }
static inline int pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */ #endif /* _LINUX_HUGE_MM_H */
...@@ -1031,13 +1031,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1031,13 +1031,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
{ {
int ret = 0; int ret = 0;
spin_lock(&tlb->mm->page_table_lock); if (__pmd_trans_huge_lock(pmd, vma) == 1) {
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&tlb->mm->page_table_lock);
wait_split_huge_page(vma->anon_vma,
pmd);
} else {
struct page *page; struct page *page;
pgtable_t pgtable; pgtable_t pgtable;
pgtable = get_pmd_huge_pte(tlb->mm); pgtable = get_pmd_huge_pte(tlb->mm);
...@@ -1054,9 +1048,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1054,9 +1048,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pte_free(tlb->mm, pgtable); pte_free(tlb->mm, pgtable);
ret = 1; ret = 1;
} }
} else
spin_unlock(&tlb->mm->page_table_lock);
return ret; return ret;
} }
...@@ -1066,21 +1057,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1066,21 +1057,15 @@ int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{ {
int ret = 0; int ret = 0;
spin_lock(&vma->vm_mm->page_table_lock); if (__pmd_trans_huge_lock(pmd, vma) == 1) {
if (likely(pmd_trans_huge(*pmd))) {
ret = !pmd_trans_splitting(*pmd);
spin_unlock(&vma->vm_mm->page_table_lock);
if (unlikely(!ret))
wait_split_huge_page(vma->anon_vma, pmd);
else {
/* /*
* All logical pages in the range are present * All logical pages in the range are present
* if backed by a huge page. * if backed by a huge page.
*/ */
spin_unlock(&vma->vm_mm->page_table_lock);
memset(vec, 1, (end - addr) >> PAGE_SHIFT); memset(vec, 1, (end - addr) >> PAGE_SHIFT);
ret = 1;
} }
} else
spin_unlock(&vma->vm_mm->page_table_lock);
return ret; return ret;
} }
...@@ -1110,21 +1095,12 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, ...@@ -1110,21 +1095,12 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
goto out; goto out;
} }
spin_lock(&mm->page_table_lock); ret = __pmd_trans_huge_lock(old_pmd, vma);
if (likely(pmd_trans_huge(*old_pmd))) { if (ret == 1) {
if (pmd_trans_splitting(*old_pmd)) {
spin_unlock(&mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, old_pmd);
ret = -1;
} else {
pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
VM_BUG_ON(!pmd_none(*new_pmd)); VM_BUG_ON(!pmd_none(*new_pmd));
set_pmd_at(mm, new_addr, new_pmd, pmd); set_pmd_at(mm, new_addr, new_pmd, pmd);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
ret = 1;
}
} else {
spin_unlock(&mm->page_table_lock);
} }
out: out:
return ret; return ret;
...@@ -1136,26 +1112,43 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1136,26 +1112,43 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int ret = 0; int ret = 0;
spin_lock(&mm->page_table_lock); if (__pmd_trans_huge_lock(pmd, vma) == 1) {
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
pmd_t entry; pmd_t entry;
entry = pmdp_get_and_clear(mm, addr, pmd); entry = pmdp_get_and_clear(mm, addr, pmd);
entry = pmd_modify(entry, newprot); entry = pmd_modify(entry, newprot);
set_pmd_at(mm, addr, pmd, entry); set_pmd_at(mm, addr, pmd, entry);
spin_unlock(&vma->vm_mm->page_table_lock); spin_unlock(&vma->vm_mm->page_table_lock);
ret = 1; ret = 1;
} }
} else
spin_unlock(&vma->vm_mm->page_table_lock);
return ret; return ret;
} }
/*
* Returns 1 if a given pmd maps a stable (not under splitting) thp.
* Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
*
* Note that if it returns 1, this routine returns without unlocking page
* table locks. So callers must unlock them.
*/
int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
{
spin_lock(&vma->vm_mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&vma->vm_mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
return -1;
} else {
/* Thp mapped by 'pmd' is stable, so we can
* handle it as it is. */
return 1;
}
}
spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
pmd_t *page_check_address_pmd(struct page *page, pmd_t *page_check_address_pmd(struct page *page,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long address, unsigned long address,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment