Commit 4cf58924 authored by Joel Fernandes (Google)'s avatar Joel Fernandes (Google) Committed by Linus Torvalds

mm: treewide: remove unused address argument from pte_alloc functions

Patch series "Add support for fast mremap".

This series speeds up the mremap(2) syscall by copying page tables at
the PMD level even for non-THP systems.  There is concern that the extra
'address' argument that mremap passes to pte_alloc may do something
subtle architecture related in the future that may make the scheme not
work.  Also we find that there is no point in passing the 'address' to
pte_alloc since its unused.  This patch therefore removes this argument
tree-wide resulting in a nice negative diff as well.  Also ensuring
along the way that the enabled architectures do not do anything funky
with the 'address' argument that goes unnoticed by the optimization.

Build and boot tested on x86-64.  Build tested on arm64.  The config
enablement patch for arm64 will be posted in the future after more
testing.

The changes were obtained by applying the following Coccinelle script.
(thanks Julia for answering all Coccinelle questions!).
Following fix ups were done manually:
* Removal of address argument from  pte_fragment_alloc
* Removal of pte_alloc_one_fast definitions from m68k and microblaze.

// Options: --include-headers --no-includes
// Note: I split the 'identifier fn' line, so if you are manually
// running it, please unsplit it so it runs for you.

virtual patch

@pte_alloc_func_def depends on patch exists@
identifier E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
type T2;
@@

 fn(...
- , T2 E2
 )
 { ... }

@pte_alloc_func_proto_noarg depends on patch exists@
type T1, T2, T3, T4;
identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@

(
- T3 fn(T1, T2);
+ T3 fn(T1);
|
- T3 fn(T1, T2, T4);
+ T3 fn(T1, T2);
)

@pte_alloc_func_proto depends on patch exists@
identifier E1, E2, E4;
type T1, T2, T3, T4;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@

(
- T3 fn(T1 E1, T2 E2);
+ T3 fn(T1 E1);
|
- T3 fn(T1 E1, T2 E2, T4 E4);
+ T3 fn(T1 E1, T2 E2);
)

@pte_alloc_func_call depends on patch exists@
expression E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@

 fn(...
-,  E2
 )

@pte_alloc_macro depends on patch exists@
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
identifier a, b, c;
expression e;
position p;
@@

(
- #define fn(a, b, c) e
+ #define fn(a, b) e
|
- #define fn(a, b) e
+ #define fn(a) e
)

Link: http://lkml.kernel.org/r/20181108181201.88826-2-joelaf@google.comSigned-off-by: default avatarJoel Fernandes (Google) <joel@joelfernandes.org>
Suggested-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Acked-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Julia Lawall <Julia.Lawall@lip6.fr>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ff1522bb
......@@ -52,7 +52,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
}
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
......@@ -65,9 +65,9 @@ pte_free_kernel(struct mm_struct *mm, pte_t *pte)
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
pte_alloc_one(struct mm_struct *mm)
{
pte_t *pte = pte_alloc_one_kernel(mm, address);
pte_t *pte = pte_alloc_one_kernel(mm);
struct page *page;
if (!pte)
......
......@@ -90,8 +90,7 @@ static inline int __get_order_pte(void)
return get_order(PTRS_PER_PTE * sizeof(pte_t));
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
......@@ -102,7 +101,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
pte_alloc_one(struct mm_struct *mm)
{
pgtable_t pte_pg;
struct page *page;
......
......@@ -81,7 +81,7 @@ static inline void clean_pte_table(pte_t *pte)
* +------------+
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
......@@ -93,7 +93,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......
......@@ -91,13 +91,13 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(PGALLOC_GFP);
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......
......@@ -59,8 +59,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long) pgd);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......@@ -75,8 +74,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
}
/* _kernel variant gets to use a different allocator */
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
gfp_t flags = GFP_KERNEL | __GFP_ZERO;
return (pte_t *) __get_free_page(flags);
......
......@@ -83,7 +83,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
pmd_val(*pmd_entry) = __pa(pte);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
void *pg;
......@@ -99,8 +99,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
return page;
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long addr)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
......
......@@ -12,8 +12,7 @@ extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
extern const char bad_pmd_string[];
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
unsigned long page = __get_free_page(GFP_DMA);
......@@ -32,8 +31,6 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
(unsigned long)(page_address(page)))
......@@ -50,8 +47,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_DMA, 0);
pte_t *pte;
......
......@@ -8,7 +8,7 @@
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
......@@ -28,7 +28,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
free_page((unsigned long) pte);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
pte_t *pte;
......
......@@ -35,8 +35,7 @@ do { \
tlb_remove_page((tlb), pte); \
} while (0)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
unsigned long page = __get_free_page(GFP_KERNEL);
......@@ -47,8 +46,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return (pte_t *) (page);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_pages(GFP_KERNEL, 0);
......
......@@ -108,10 +108,9 @@ static inline void free_pgd_slow(pgd_t *pgd)
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *ptepage;
......@@ -132,20 +131,6 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return ptepage;
}
static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
unsigned long address)
{
unsigned long *ret;
ret = pte_quicklist;
if (ret != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
return (pte_t *)ret;
}
static inline void pte_free_fast(pte_t *pte)
{
*(unsigned long **)pte = pte_quicklist;
......
......@@ -235,8 +235,7 @@ unsigned long iopa(unsigned long addr)
return pa;
}
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
if (mem_init_done) {
......
......@@ -50,14 +50,12 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_pages((unsigned long)pgd, PGD_ORDER);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......
......@@ -22,8 +22,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
#define check_pgt_cache() do { } while (0)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long addr)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
......@@ -34,7 +33,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return pte;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
pgtable_t pte;
......
......@@ -37,8 +37,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_pages((unsigned long)pgd, PGD_ORDER);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
......@@ -47,8 +46,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return pte;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......
......@@ -70,10 +70,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL, 0);
......
......@@ -118,8 +118,7 @@ EXPORT_SYMBOL(iounmap);
* the memblock infrastructure.
*/
pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
......
......@@ -122,7 +122,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long address)
pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!page)
......@@ -135,7 +135,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
}
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
return pte;
......
......@@ -61,10 +61,10 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
extern pgtable_t pte_alloc_one(struct mm_struct *mm);
void pte_frag_destroy(void *pte_frag);
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
......
......@@ -39,7 +39,7 @@ extern struct vmemmap_backing *vmemmap_list;
extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift]
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern pte_t *pte_fragment_alloc(struct mm_struct *, int);
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pte_fragment_free(unsigned long *, int);
extern void pmd_fragment_free(unsigned long *);
......@@ -190,16 +190,14 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
return (pgtable_t)pmd_page_vaddr(pmd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)pte_fragment_alloc(mm, address, 1);
return (pte_t *)pte_fragment_alloc(mm, 1);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
return (pgtable_t)pte_fragment_alloc(mm, address, 0);
return (pgtable_t)pte_fragment_alloc(mm, 0);
}
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
......
......@@ -79,10 +79,10 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
extern pgtable_t pte_alloc_one(struct mm_struct *mm);
void pte_frag_destroy(void *pte_frag);
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
......
......@@ -93,14 +93,12 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
pte_t *pte;
......
......@@ -95,7 +95,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
return (pte_t *)ret;
}
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
{
pte_t *pte;
......
......@@ -43,17 +43,17 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[], _sinittext[], _einittext[];
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
if (!slab_is_available())
return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
return (pte_t *)pte_fragment_alloc(mm, address, 1);
return (pte_t *)pte_fragment_alloc(mm, 1);
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
return (pgtable_t)pte_fragment_alloc(mm, address, 0);
return (pgtable_t)pte_fragment_alloc(mm, 0);
}
void __iomem *
......
......@@ -82,15 +82,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#endif /* __PAGETABLE_PMD_FOLDED */
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline struct page *pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......
......@@ -139,8 +139,8 @@ static inline void pmd_populate(struct mm_struct *mm,
/*
* page table entry allocation/free routines.
*/
#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
......
......@@ -32,14 +32,12 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
/*
* Allocate and free page tables.
*/
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page;
void *pg;
......
......@@ -58,10 +58,9 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
pgtable_t pte_alloc_one(struct mm_struct *mm);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
......
......@@ -60,10 +60,8 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
kmem_cache_free(pgtable_cache, pmd);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address);
pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address);
pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
pgtable_t pte_alloc_one(struct mm_struct *mm);
void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
void pte_free(struct mm_struct *mm, pgtable_t ptepage);
......
......@@ -2925,8 +2925,7 @@ void __flush_tlb_all(void)
: : "r" (pstate));
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
pte_t *pte = NULL;
......@@ -2937,8 +2936,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return pte;
}
pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
......
......@@ -364,12 +364,12 @@ pgd_t *get_pgd_fast(void)
* Alignments up to the page size are the same for physical and virtual
* addresses of the nocache area.
*/
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
unsigned long pte;
struct page *page;
if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
return NULL;
page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
if (!pgtable_page_ctor(page)) {
......
......@@ -25,8 +25,8 @@
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *);
extern pgtable_t pte_alloc_one(struct mm_struct *);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
......
......@@ -199,7 +199,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long) pgd);
}
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
......@@ -207,7 +207,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
return pte;
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......
......@@ -34,7 +34,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
* Allocate one PTE table.
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
......@@ -46,7 +46,7 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
}
static inline pgtable_t
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......
......@@ -47,8 +47,8 @@ extern gfp_t __userpte_alloc_gfp;
extern pgd_t *pgd_alloc(struct mm_struct *);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *);
extern pgtable_t pte_alloc_one(struct mm_struct *);
/* Should really implement gc for free page table pages. This could be
done with a reference count in struct page. */
......
......@@ -23,12 +23,12 @@ EXPORT_SYMBOL(physical_mask);
gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
......
......@@ -38,8 +38,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
free_page((unsigned long)pgd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *ptep;
int i;
......@@ -52,13 +51,12 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return ptep;
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long addr)
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
pte_t *pte;
struct page *page;
pte = pte_alloc_one_kernel(mm, addr);
pte = pte_alloc_one_kernel(mm);
if (!pte)
return NULL;
page = virt_to_page(pte);
......
......@@ -1873,8 +1873,8 @@ static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd);
/*
* The following ifdef needed to get the 4level-fixup.h header to work.
......@@ -2005,18 +2005,17 @@ static inline void pgtable_page_dtor(struct page *page)
pte_unmap(pte); \
} while (0)
#define pte_alloc(mm, pmd, address) \
(unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
#define pte_alloc_map(mm, pmd, address) \
(pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
(pte_alloc(mm, pmd, address) ? \
(pte_alloc(mm, pmd) ? \
NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
#define pte_alloc_kernel(pmd, address) \
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
#if USE_SPLIT_PMD_PTLOCKS
......
......@@ -568,7 +568,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
return VM_FAULT_FALLBACK;
}
pgtable = pte_alloc_one(vma->vm_mm, haddr);
pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable)) {
ret = VM_FAULT_OOM;
goto release;
......@@ -702,7 +702,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
struct page *zero_page;
bool set;
vm_fault_t ret;
pgtable = pte_alloc_one(vma->vm_mm, haddr);
pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable))
return VM_FAULT_OOM;
zero_page = mm_get_huge_zero_page(vma->vm_mm);
......@@ -791,7 +791,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
return VM_FAULT_SIGBUS;
if (arch_needs_pgtable_deposit()) {
pgtable = pte_alloc_one(vma->vm_mm, addr);
pgtable = pte_alloc_one(vma->vm_mm);
if (!pgtable)
return VM_FAULT_OOM;
}
......@@ -927,7 +927,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (!vma_is_anonymous(vma))
return 0;
pgtable = pte_alloc_one(dst_mm, addr);
pgtable = pte_alloc_one(dst_mm);
if (unlikely(!pgtable))
goto out;
......
......@@ -123,7 +123,7 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
pte_t *p;
if (slab_is_available())
p = pte_alloc_one_kernel(&init_mm, addr);
p = pte_alloc_one_kernel(&init_mm);
else
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
if (!p)
......
......@@ -400,10 +400,10 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
{
spinlock_t *ptl;
pgtable_t new = pte_alloc_one(mm, address);
pgtable_t new = pte_alloc_one(mm);
if (!new)
return -ENOMEM;
......@@ -434,9 +434,9 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
return 0;
}
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
int __pte_alloc_kernel(pmd_t *pmd)
{
pte_t *new = pte_alloc_one_kernel(&init_mm, address);
pte_t *new = pte_alloc_one_kernel(&init_mm);
if (!new)
return -ENOMEM;
......@@ -2896,7 +2896,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
*
* Here we only have down_read(mmap_sem).
*/
if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
if (pte_alloc(vma->vm_mm, vmf->pmd))
return VM_FAULT_OOM;
/* See the comment in pte_alloc_one_map() */
......@@ -3043,7 +3043,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
spin_unlock(vmf->ptl);
vmf->prealloc_pte = NULL;
} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
return VM_FAULT_OOM;
}
map_pte:
......@@ -3122,7 +3122,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
* related to pte entry. Use the preallocated table for that.
*/
if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
if (!vmf->prealloc_pte)
return VM_FAULT_OOM;
smp_wmb(); /* See comment in __pte_alloc() */
......@@ -3360,8 +3360,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
start_pgoff + nr_pages - 1);
if (pmd_none(*vmf->pmd)) {
vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
vmf->address);
vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
if (!vmf->prealloc_pte)
goto out;
smp_wmb(); /* See comment in __pte_alloc() */
......
......@@ -2636,7 +2636,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
*
* Here we only have down_read(mmap_sem).
*/
if (pte_alloc(mm, pmdp, addr))
if (pte_alloc(mm, pmdp))
goto abort;
/* See the comment in pte_alloc_one_map() */
......
......@@ -236,7 +236,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (pmd_trans_unstable(old_pmd))
continue;
}
if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
if (pte_alloc(new_vma->vm_mm, new_pmd))
break;
next = (new_addr + PMD_SIZE) & PMD_MASK;
if (extent > next - new_addr)
......
......@@ -550,7 +550,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
break;
}
if (unlikely(pmd_none(dst_pmdval)) &&
unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
unlikely(__pte_alloc(dst_mm, dst_pmd))) {
err = -ENOMEM;
break;
}
......
......@@ -647,7 +647,7 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
BUG_ON(pmd_sect(*pmd));
if (pmd_none(*pmd)) {
pte = pte_alloc_one_kernel(NULL, addr);
pte = pte_alloc_one_kernel(NULL);
if (!pte) {
kvm_err("Cannot allocate Hyp pte\n");
return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment