Commit cb900f41 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm, hugetlb: convert hugetlbfs to use split pmd lock

Hugetlb supports multiple page sizes. We use split lock only for PMD
level, but not for PUD.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: default avatarAlex Thorlton <athorlton@sgi.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "Eric W . Biederman" <ebiederm@xmission.com>
Cc: "Paul E . McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Jones <davej@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Robin Holt <robinmholt@gmail.com>
Cc: Sedat Dilek <sedat.dilek@gmail.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c389a250
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/hugetlb.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
......
...@@ -392,6 +392,15 @@ static inline int hugepage_migration_support(struct hstate *h) ...@@ -392,6 +392,15 @@ static inline int hugepage_migration_support(struct hstate *h)
return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
} }
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
if (huge_page_size(h) == PMD_SIZE)
return pmd_lockptr(mm, (pmd_t *) pte);
VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
return &mm->page_table_lock;
}
#else /* CONFIG_HUGETLB_PAGE */ #else /* CONFIG_HUGETLB_PAGE */
struct hstate {}; struct hstate {};
#define alloc_huge_page_node(h, nid) NULL #define alloc_huge_page_node(h, nid) NULL
...@@ -401,6 +410,7 @@ struct hstate {}; ...@@ -401,6 +410,7 @@ struct hstate {};
#define hstate_sizelog(s) NULL #define hstate_sizelog(s) NULL
#define hstate_vma(v) NULL #define hstate_vma(v) NULL
#define hstate_inode(i) NULL #define hstate_inode(i) NULL
#define page_hstate(page) NULL
#define huge_page_size(h) PAGE_SIZE #define huge_page_size(h) PAGE_SIZE
#define huge_page_mask(h) PAGE_MASK #define huge_page_mask(h) PAGE_MASK
#define vma_kernel_pagesize(v) PAGE_SIZE #define vma_kernel_pagesize(v) PAGE_SIZE
...@@ -421,6 +431,22 @@ static inline pgoff_t basepage_index(struct page *page) ...@@ -421,6 +431,22 @@ static inline pgoff_t basepage_index(struct page *page)
#define dissolve_free_huge_pages(s, e) do {} while (0) #define dissolve_free_huge_pages(s, e) do {} while (0)
#define pmd_huge_support() 0 #define pmd_huge_support() 0
#define hugepage_migration_support(h) 0 #define hugepage_migration_support(h) 0
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
}
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
spinlock_t *ptl;
ptl = huge_pte_lockptr(h, mm, pte);
spin_lock(ptl);
return ptl;
}
#endif /* _LINUX_HUGETLB_H */ #endif /* _LINUX_HUGETLB_H */
...@@ -139,7 +139,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry) ...@@ -139,7 +139,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address); unsigned long address);
extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); extern void migration_entry_wait_huge(struct vm_area_struct *vma,
struct mm_struct *mm, pte_t *pte);
#else #else
#define make_migration_entry(page, write) swp_entry(0, 0) #define make_migration_entry(page, write) swp_entry(0, 0)
...@@ -151,8 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp) ...@@ -151,8 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp)
static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { } unsigned long address) { }
static inline void migration_entry_wait_huge(struct mm_struct *mm, static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
pte_t *pte) { } struct mm_struct *mm, pte_t *pte) { }
static inline int is_write_migration_entry(swp_entry_t entry) static inline int is_write_migration_entry(swp_entry_t entry)
{ {
return 0; return 0;
......
This diff is collapsed.
...@@ -525,8 +525,9 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, ...@@ -525,8 +525,9 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
int nid; int nid;
struct page *page; struct page *page;
spinlock_t *ptl;
spin_lock(&vma->vm_mm->page_table_lock); ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
page = pte_page(huge_ptep_get((pte_t *)pmd)); page = pte_page(huge_ptep_get((pte_t *)pmd));
nid = page_to_nid(page); nid = page_to_nid(page);
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
...@@ -536,7 +537,7 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, ...@@ -536,7 +537,7 @@ static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
(flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
isolate_huge_page(page, private); isolate_huge_page(page, private);
unlock: unlock:
spin_unlock(&vma->vm_mm->page_table_lock); spin_unlock(ptl);
#else #else
BUG(); BUG();
#endif #endif
......
...@@ -130,7 +130,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, ...@@ -130,7 +130,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
ptep = huge_pte_offset(mm, addr); ptep = huge_pte_offset(mm, addr);
if (!ptep) if (!ptep)
goto out; goto out;
ptl = &mm->page_table_lock; ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
} else { } else {
pmd = mm_find_pmd(mm, addr); pmd = mm_find_pmd(mm, addr);
if (!pmd) if (!pmd)
...@@ -249,9 +249,10 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, ...@@ -249,9 +249,10 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
__migration_entry_wait(mm, ptep, ptl); __migration_entry_wait(mm, ptep, ptl);
} }
void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte) void migration_entry_wait_huge(struct vm_area_struct *vma,
struct mm_struct *mm, pte_t *pte)
{ {
spinlock_t *ptl = &(mm)->page_table_lock; spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
__migration_entry_wait(mm, pte, ptl); __migration_entry_wait(mm, pte, ptl);
} }
......
...@@ -601,7 +601,7 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm, ...@@ -601,7 +601,7 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
if (unlikely(PageHuge(page))) { if (unlikely(PageHuge(page))) {
pte = huge_pte_offset(mm, address); pte = huge_pte_offset(mm, address);
ptl = &mm->page_table_lock; ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
goto check; goto check;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment