Commit b95046b0 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, sparse, page_ext: drop ugly N_HIGH_MEMORY branches for allocations

Commit f52407ce ("memory hotplug: alloc page from other node in
memory online") has introduced N_HIGH_MEMORY checks to only use NUMA
aware allocations when there is some memory present because the
respective node might not have any memory yet at the time and so it
could fail or even OOM.

Things have changed since then though.  Zonelists are now always
initialized before we do any allocations even for hotplug (see
959ecc48 ("mm/memory_hotplug.c: fix building of node hotplug
zonelist")).

Therefore these checks are not really needed.  In fact caller of the
allocator should never care about whether the node is populated because
that might change at any time.

Link: http://lkml.kernel.org/r/20170721143915.14161-10-mhocko@kernel.orgSigned-off-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Shaohua Li <shaohua.li@intel.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Toshi Kani <toshi.kani@hpe.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b93e0f32
...@@ -222,10 +222,7 @@ static void *__meminit alloc_page_ext(size_t size, int nid) ...@@ -222,10 +222,7 @@ static void *__meminit alloc_page_ext(size_t size, int nid)
return addr; return addr;
} }
if (node_state(nid, N_HIGH_MEMORY)) addr = vzalloc_node(size, nid);
addr = vzalloc_node(size, nid);
else
addr = vzalloc(size);
return addr; return addr;
} }
......
...@@ -54,14 +54,9 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) ...@@ -54,14 +54,9 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
if (slab_is_available()) { if (slab_is_available()) {
struct page *page; struct page *page;
if (node_state(node, N_HIGH_MEMORY)) page = alloc_pages_node(node,
page = alloc_pages_node( GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
node, GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL, get_order(size));
get_order(size));
else
page = alloc_pages(
GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
get_order(size));
if (page) if (page)
return page_address(page); return page_address(page);
return NULL; return NULL;
......
...@@ -65,14 +65,10 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid) ...@@ -65,14 +65,10 @@ static noinline struct mem_section __ref *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT * unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section); sizeof(struct mem_section);
if (slab_is_available()) { if (slab_is_available())
if (node_state(nid, N_HIGH_MEMORY)) section = kzalloc_node(array_size, GFP_KERNEL, nid);
section = kzalloc_node(array_size, GFP_KERNEL, nid); else
else
section = kzalloc(array_size, GFP_KERNEL);
} else {
section = memblock_virt_alloc_node(array_size, nid); section = memblock_virt_alloc_node(array_size, nid);
}
return section; return section;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment