Commit d7f946d0 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm/mempolicy: rename alloc_pages_current to alloc_pages

When CONFIG_NUMA is enabled, alloc_pages() is a wrapper around
alloc_pages_current().  This is pointless, just implement alloc_pages()
directly.

Link: https://lkml.kernel.org/r/20210225150642.2582252-5-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 84172f4b
...@@ -546,13 +546,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, ...@@ -546,13 +546,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); struct page *alloc_pages(gfp_t gfp, unsigned int order);
static inline struct page *
alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_current(gfp_mask, order);
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr, struct vm_area_struct *vma, unsigned long addr,
int node, bool hugepage); int node, bool hugepage);
......
...@@ -2245,7 +2245,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, ...@@ -2245,7 +2245,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
EXPORT_SYMBOL(alloc_pages_vma); EXPORT_SYMBOL(alloc_pages_vma);
/** /**
* alloc_pages_current - Allocate pages. * alloc_pages - Allocate pages.
* *
* @gfp: * @gfp:
* %GFP_USER user allocation, * %GFP_USER user allocation,
...@@ -2259,7 +2259,7 @@ EXPORT_SYMBOL(alloc_pages_vma); ...@@ -2259,7 +2259,7 @@ EXPORT_SYMBOL(alloc_pages_vma);
* interrupt context and apply the current process NUMA policy. * interrupt context and apply the current process NUMA policy.
* Returns NULL when no page can be allocated. * Returns NULL when no page can be allocated.
*/ */
struct page *alloc_pages_current(gfp_t gfp, unsigned order) struct page *alloc_pages(gfp_t gfp, unsigned order)
{ {
struct mempolicy *pol = &default_policy; struct mempolicy *pol = &default_policy;
struct page *page; struct page *page;
...@@ -2280,7 +2280,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) ...@@ -2280,7 +2280,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
return page; return page;
} }
EXPORT_SYMBOL(alloc_pages_current); EXPORT_SYMBOL(alloc_pages);
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment