Commit d6379159 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm: remove unused savedwrite infrastructure

NUMA hinting no longer uses savedwrite, let's rip it out.

... and while at it, drop __pte_write() and __pmd_write() on ppc64.

Link: https://lkml.kernel.org/r/20221108174652.198904-7-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nadav Amit <namit@vmware.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6a56ccbc
...@@ -401,35 +401,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, ...@@ -401,35 +401,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
#define pmdp_clear_flush_young pmdp_test_and_clear_young #define pmdp_clear_flush_young pmdp_test_and_clear_young
static inline int __pte_write(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
}
#ifdef CONFIG_NUMA_BALANCING
#define pte_savedwrite pte_savedwrite
static inline bool pte_savedwrite(pte_t pte)
{
/*
* Saved write ptes are prot none ptes that doesn't have
* privileged bit sit. We mark prot none as one which has
* present and pviliged bit set and RWX cleared. To mark
* protnone which used to have _PAGE_WRITE set we clear
* the privileged bit.
*/
return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
}
#else
#define pte_savedwrite pte_savedwrite
static inline bool pte_savedwrite(pte_t pte)
{
return false;
}
#endif
static inline int pte_write(pte_t pte) static inline int pte_write(pte_t pte)
{ {
return __pte_write(pte) || pte_savedwrite(pte); return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
} }
static inline int pte_read(pte_t pte) static inline int pte_read(pte_t pte)
...@@ -441,24 +415,16 @@ static inline int pte_read(pte_t pte) ...@@ -441,24 +415,16 @@ static inline int pte_read(pte_t pte)
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
if (__pte_write(*ptep)) if (pte_write(*ptep))
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
else if (unlikely(pte_savedwrite(*ptep)))
pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
} }
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
/* if (pte_write(*ptep))
* We should not find protnone for hugetlb, but this complete the
* interface.
*/
if (__pte_write(*ptep))
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
else if (unlikely(pte_savedwrite(*ptep)))
pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
} }
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
...@@ -535,36 +501,6 @@ static inline int pte_protnone(pte_t pte) ...@@ -535,36 +501,6 @@ static inline int pte_protnone(pte_t pte)
return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) == return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE); cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
} }
#define pte_mk_savedwrite pte_mk_savedwrite
static inline pte_t pte_mk_savedwrite(pte_t pte)
{
/*
* Used by Autonuma subsystem to preserve the write bit
* while marking the pte PROT_NONE. Only allow this
* on PROT_NONE pte
*/
VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
}
#define pte_clear_savedwrite pte_clear_savedwrite
static inline pte_t pte_clear_savedwrite(pte_t pte)
{
/*
* Used by KSM subsystem to make a protnone pte readonly.
*/
VM_BUG_ON(!pte_protnone(pte));
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
}
#else
#define pte_clear_savedwrite pte_clear_savedwrite
static inline pte_t pte_clear_savedwrite(pte_t pte)
{
VM_WARN_ON(1);
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
}
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
static inline bool pte_hw_valid(pte_t pte) static inline bool pte_hw_valid(pte_t pte)
...@@ -641,8 +577,6 @@ static inline unsigned long pte_pfn(pte_t pte) ...@@ -641,8 +577,6 @@ static inline unsigned long pte_pfn(pte_t pte)
/* Generic modifiers for PTE bits */ /* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte)
{ {
if (unlikely(pte_savedwrite(pte)))
return pte_clear_savedwrite(pte);
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE)); return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
} }
...@@ -1139,8 +1073,6 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) ...@@ -1139,8 +1073,6 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
#define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd)))
#define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd)))
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
...@@ -1162,8 +1094,6 @@ static inline int pmd_protnone(pmd_t pmd) ...@@ -1162,8 +1094,6 @@ static inline int pmd_protnone(pmd_t pmd)
#endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA_BALANCING */
#define pmd_write(pmd) pte_write(pmd_pte(pmd)) #define pmd_write(pmd) pte_write(pmd_pte(pmd))
#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
#define pmd_access_permitted pmd_access_permitted #define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write) static inline bool pmd_access_permitted(pmd_t pmd, bool write)
...@@ -1241,10 +1171,8 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, ...@@ -1241,10 +1171,8 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp) pmd_t *pmdp)
{ {
if (__pmd_write((*pmdp))) if (pmd_write(*pmdp))
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
else if (unlikely(pmd_savedwrite(*pmdp)))
pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
} }
/* /*
......
...@@ -265,7 +265,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -265,7 +265,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
} }
pte = kvmppc_read_update_linux_pte(ptep, writing); pte = kvmppc_read_update_linux_pte(ptep, writing);
if (pte_present(pte) && !pte_protnone(pte)) { if (pte_present(pte) && !pte_protnone(pte)) {
if (writing && !__pte_write(pte)) if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */ /* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel); ptel = hpte_make_readonly(ptel);
is_ci = pte_ci(pte); is_ci = pte_ci(pte);
......
...@@ -503,30 +503,6 @@ static inline pte_t pte_sw_mkyoung(pte_t pte) ...@@ -503,30 +503,6 @@ static inline pte_t pte_sw_mkyoung(pte_t pte)
#define pte_sw_mkyoung pte_sw_mkyoung #define pte_sw_mkyoung pte_sw_mkyoung
#endif #endif
#ifndef pte_savedwrite
#define pte_savedwrite pte_write
#endif
#ifndef pte_mk_savedwrite
#define pte_mk_savedwrite pte_mkwrite
#endif
#ifndef pte_clear_savedwrite
#define pte_clear_savedwrite pte_wrprotect
#endif
#ifndef pmd_savedwrite
#define pmd_savedwrite pmd_write
#endif
#ifndef pmd_mk_savedwrite
#define pmd_mk_savedwrite pmd_mkwrite
#endif
#ifndef pmd_clear_savedwrite
#define pmd_clear_savedwrite pmd_wrprotect
#endif
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pmdp_set_wrprotect(struct mm_struct *mm, static inline void pmdp_set_wrprotect(struct mm_struct *mm,
......
...@@ -171,18 +171,6 @@ static void __init pte_advanced_tests(struct pgtable_debug_args *args) ...@@ -171,18 +171,6 @@ static void __init pte_advanced_tests(struct pgtable_debug_args *args)
ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
} }
static void __init pte_savedwrite_tests(struct pgtable_debug_args *args)
{
pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
return;
pr_debug("Validating PTE saved write\n");
WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
{ {
...@@ -302,22 +290,6 @@ static void __init pmd_leaf_tests(struct pgtable_debug_args *args) ...@@ -302,22 +290,6 @@ static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
WARN_ON(!pmd_leaf(pmd)); WARN_ON(!pmd_leaf(pmd));
} }
static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args)
{
pmd_t pmd;
if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
return;
if (!has_transparent_hugepage())
return;
pr_debug("Validating PMD saved write\n");
pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none);
WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
{ {
...@@ -451,7 +423,6 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { } ...@@ -451,7 +423,6 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args) { }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
...@@ -1288,9 +1259,6 @@ static int __init debug_vm_pgtable(void) ...@@ -1288,9 +1259,6 @@ static int __init debug_vm_pgtable(void)
pmd_leaf_tests(&args); pmd_leaf_tests(&args);
pud_leaf_tests(&args); pud_leaf_tests(&args);
pte_savedwrite_tests(&args);
pmd_savedwrite_tests(&args);
pte_special_tests(&args); pte_special_tests(&args);
pte_protnone_tests(&args); pte_protnone_tests(&args);
pmd_protnone_tests(&args); pmd_protnone_tests(&args);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment