Commit 188cac58 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/hugetlb: enforce that PMD PT sharing has split PMD PT locks

Sharing page tables between processes but falling back to per-MM page
table locks cannot possibly work.

So, let's make sure that we do have split PMD locks by adding a new
Kconfig option and letting that depend on CONFIG_SPLIT_PMD_PTLOCKS.

Link: https://lkml.kernel.org/r/20240726150728.3159964-3-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarMike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 394290cb
...@@ -288,6 +288,10 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP ...@@ -288,6 +288,10 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP depends on SPARSEMEM_VMEMMAP
config HUGETLB_PMD_PAGE_TABLE_SHARING
def_bool HUGETLB_PAGE
depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS
config ARCH_HAS_GIGANTIC_PAGE config ARCH_HAS_GIGANTIC_PAGE
bool bool
......
...@@ -1251,7 +1251,7 @@ static inline __init void hugetlb_cma_reserve(int order) ...@@ -1251,7 +1251,7 @@ static inline __init void hugetlb_cma_reserve(int order)
} }
#endif #endif
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte) static inline bool hugetlb_pmd_shared(pte_t *pte)
{ {
return page_count(virt_to_page(pte)) > 1; return page_count(virt_to_page(pte)) > 1;
...@@ -1287,8 +1287,7 @@ bool __vma_private_lock(struct vm_area_struct *vma); ...@@ -1287,8 +1287,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
static inline pte_t * static inline pte_t *
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
{ {
#if defined(CONFIG_HUGETLB_PAGE) && \ #if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
/* /*
......
...@@ -7211,7 +7211,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, ...@@ -7211,7 +7211,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
return 0; return 0;
} }
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static unsigned long page_table_shareable(struct vm_area_struct *svma, static unsigned long page_table_shareable(struct vm_area_struct *svma,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, pgoff_t idx) unsigned long addr, pgoff_t idx)
...@@ -7373,7 +7373,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -7373,7 +7373,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
return 1; return 1;
} }
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud) unsigned long addr, pud_t *pud)
...@@ -7396,7 +7396,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) ...@@ -7396,7 +7396,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{ {
return false; return false;
} }
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
...@@ -7494,7 +7494,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h) ...@@ -7494,7 +7494,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
/* See description above. Architectures can provide their own version. */ /* See description above. Architectures can provide their own version. */
__weak unsigned long hugetlb_mask_last_page(struct hstate *h) __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
{ {
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
if (huge_page_size(h) == PMD_SIZE) if (huge_page_size(h) == PMD_SIZE)
return PUD_SIZE - PMD_SIZE; return PUD_SIZE - PMD_SIZE;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment