Commit 394290cb authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm: turn USE_SPLIT_PTE_PTLOCKS / USE_SPLIT_PTE_PTLOCKS into Kconfig options

Patch series "mm: split PTE/PMD PT table Kconfig cleanups+clarifications".

This series is a follow up to the fixes:
	"[PATCH v1 0/2] mm/hugetlb: fix hugetlb vs. core-mm PT locking"

When working on the fixes, I wondered why 8xx is fine (-> never uses split
PT locks) and how PT locking even works properly with PMD page table
sharing (-> always requires split PMD PT locks).

Let's improve the split PT lock detection, make hugetlb properly depend on
it and make 8xx bail out if it would ever get enabled by accident.

As an alternative to patch #3 we could extend the Kconfig
SPLIT_PTE_PTLOCKS option from patch #2 -- but enforcing it closer to the
code that actually implements it feels a bit nicer for documentation
purposes, and there is no need to actually disable it because it should
always be disabled (!SMP).

Did a bunch of cross-compilations to make sure that split PTE/PMD PT locks
are still getting used where we would expect them.

[1] https://lkml.kernel.org/r/20240725183955.2268884-1-david@redhat.com


This patch (of 3):

Let's clean that up a bit and prepare for depending on
CONFIG_SPLIT_PMD_PTLOCKS in other Kconfig options.

More cleanups would be reasonable (like the arch-specific "depends on" for
CONFIG_SPLIT_PTE_PTLOCKS), but we'll leave that for another day.

Link: https://lkml.kernel.org/r/20240726150728.3159964-1-david@redhat.com
Link: https://lkml.kernel.org/r/20240726150728.3159964-2-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarMike Rapoport (Microsoft) <rppt@kernel.org>
Reviewed-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: default avatarQi Zheng <zhengqi.arch@bytedance.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 57979fab
...@@ -61,7 +61,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, ...@@ -61,7 +61,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
return ret; return ret;
} }
#if USE_SPLIT_PTE_PTLOCKS #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
/* /*
* If we are using split PTE locks, then we need to take the page * If we are using split PTE locks, then we need to take the page
* lock here. Otherwise we are using shared mm->page_table_lock * lock here. Otherwise we are using shared mm->page_table_lock
...@@ -80,10 +80,10 @@ static inline void do_pte_unlock(spinlock_t *ptl) ...@@ -80,10 +80,10 @@ static inline void do_pte_unlock(spinlock_t *ptl)
{ {
spin_unlock(ptl); spin_unlock(ptl);
} }
#else /* !USE_SPLIT_PTE_PTLOCKS */ #else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static inline void do_pte_lock(spinlock_t *ptl) {} static inline void do_pte_lock(spinlock_t *ptl) {}
static inline void do_pte_unlock(spinlock_t *ptl) {} static inline void do_pte_unlock(spinlock_t *ptl) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */ #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static int adjust_pte(struct vm_area_struct *vma, unsigned long address, static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
unsigned long pfn) unsigned long pfn)
......
...@@ -665,7 +665,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) ...@@ -665,7 +665,7 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
{ {
spinlock_t *ptl = NULL; spinlock_t *ptl = NULL;
#if USE_SPLIT_PTE_PTLOCKS #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
ptl = ptlock_ptr(page_ptdesc(page)); ptl = ptlock_ptr(page_ptdesc(page));
spin_lock_nest_lock(ptl, &mm->page_table_lock); spin_lock_nest_lock(ptl, &mm->page_table_lock);
#endif #endif
...@@ -1553,7 +1553,8 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, ...@@ -1553,7 +1553,8 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
__set_pfn_prot(pfn, PAGE_KERNEL_RO); __set_pfn_prot(pfn, PAGE_KERNEL_RO);
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned) if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS) &&
!pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(XEN_LAZY_MMU); xen_mc_issue(XEN_LAZY_MMU);
...@@ -1581,7 +1582,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level) ...@@ -1581,7 +1582,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
if (pinned) { if (pinned) {
xen_mc_batch(); xen_mc_batch();
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) if (level == PT_PTE && IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS))
__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
__set_pfn_prot(pfn, PAGE_KERNEL); __set_pfn_prot(pfn, PAGE_KERNEL);
......
...@@ -2891,7 +2891,7 @@ static inline void pagetable_free(struct ptdesc *pt) ...@@ -2891,7 +2891,7 @@ static inline void pagetable_free(struct ptdesc *pt)
__free_pages(page, compound_order(page)); __free_pages(page, compound_order(page));
} }
#if USE_SPLIT_PTE_PTLOCKS #if defined(CONFIG_SPLIT_PTE_PTLOCKS)
#if ALLOC_SPLIT_PTLOCKS #if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void); void __init ptlock_cache_init(void);
bool ptlock_alloc(struct ptdesc *ptdesc); bool ptlock_alloc(struct ptdesc *ptdesc);
...@@ -2949,7 +2949,7 @@ static inline bool ptlock_init(struct ptdesc *ptdesc) ...@@ -2949,7 +2949,7 @@ static inline bool ptlock_init(struct ptdesc *ptdesc)
return true; return true;
} }
#else /* !USE_SPLIT_PTE_PTLOCKS */ #else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
/* /*
* We use mm->page_table_lock to guard all pagetable pages of the mm. * We use mm->page_table_lock to guard all pagetable pages of the mm.
*/ */
...@@ -2964,7 +2964,7 @@ static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte) ...@@ -2964,7 +2964,7 @@ static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
static inline void ptlock_cache_init(void) {} static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; } static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {} static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */ #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{ {
...@@ -3024,7 +3024,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, ...@@ -3024,7 +3024,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address)) NULL: pte_offset_kernel(pmd, address))
#if USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_SPLIT_PMD_PTLOCKS)
static inline struct page *pmd_pgtable_page(pmd_t *pmd) static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{ {
......
...@@ -947,7 +947,7 @@ struct mm_struct { ...@@ -947,7 +947,7 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions; struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif #endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
pgtable_t pmd_huge_pte; /* protected by page_table_lock */ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif #endif
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
#include <asm/tlbbatch.h> #include <asm/tlbbatch.h>
#endif #endif
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/* /*
......
...@@ -832,7 +832,7 @@ static void check_mm(struct mm_struct *mm) ...@@ -832,7 +832,7 @@ static void check_mm(struct mm_struct *mm)
pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
mm_pgtables_bytes(mm)); mm_pgtables_bytes(mm));
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
VM_BUG_ON_MM(mm->pmd_huge_pte, mm); VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
#endif #endif
} }
...@@ -1276,7 +1276,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, ...@@ -1276,7 +1276,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
RCU_INIT_POINTER(mm->exe_file, NULL); RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_subscriptions_init(mm); mmu_notifier_subscriptions_init(mm);
init_tlb_flush_pending(mm); init_tlb_flush_pending(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
mm->pmd_huge_pte = NULL; mm->pmd_huge_pte = NULL;
#endif #endif
mm_init_uprobes_state(mm); mm_init_uprobes_state(mm);
......
...@@ -585,17 +585,21 @@ config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE ...@@ -585,17 +585,21 @@ config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
# at the same time (e.g. copy_page_range()). # at the same time (e.g. copy_page_range()).
# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page. # DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
# #
config SPLIT_PTLOCK_CPUS config SPLIT_PTE_PTLOCKS
int def_bool y
default "999999" if !MMU depends on MMU
default "999999" if ARM && !CPU_CACHE_VIPT depends on NR_CPUS >= 4
default "999999" if PARISC && !PA20 depends on !ARM || CPU_CACHE_VIPT
default "999999" if SPARC32 depends on !PARISC || PA20
default "4" depends on !SPARC32
config ARCH_ENABLE_SPLIT_PMD_PTLOCK config ARCH_ENABLE_SPLIT_PMD_PTLOCK
bool bool
config SPLIT_PMD_PTLOCKS
def_bool y
depends on SPLIT_PTE_PTLOCKS && ARCH_ENABLE_SPLIT_PMD_PTLOCK
# #
# support for memory balloon # support for memory balloon
config MEMORY_BALLOON config MEMORY_BALLOON
......
...@@ -6571,7 +6571,7 @@ long copy_folio_from_user(struct folio *dst_folio, ...@@ -6571,7 +6571,7 @@ long copy_folio_from_user(struct folio *dst_folio,
} }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS
static struct kmem_cache *page_ptl_cachep; static struct kmem_cache *page_ptl_cachep;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment