Commit 8e2cdbcb authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

x86-64: fall back to regular page vmemmap on allocation failure

Memory hotplug can happen on a machine under load, memory shortness
and fragmentation, so huge page allocations for the vmemmap are not
guaranteed to succeed.

Try to fall back to regular pages before failing the hotplug event
completely.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Ben Hutchings <ben@decadent.org.uk>
Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e8216da5
...@@ -1303,12 +1303,11 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, ...@@ -1303,12 +1303,11 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
pte_t entry;
void *p; void *p;
p = vmemmap_alloc_block_buf(PMD_SIZE, node); p = vmemmap_alloc_block_buf(PMD_SIZE, node);
if (!p) if (p) {
return -ENOMEM; pte_t entry;
entry = pfn_pte(__pa(p) >> PAGE_SHIFT, entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE); PAGE_KERNEL_LARGE);
...@@ -1326,8 +1325,15 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, ...@@ -1326,8 +1325,15 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
addr_end = addr + PMD_SIZE; addr_end = addr + PMD_SIZE;
p_end = p + PMD_SIZE; p_end = p + PMD_SIZE;
} else continue;
}
} else if (pmd_large(*pmd)) {
vmemmap_verify((pte_t *)pmd, node, addr, next); vmemmap_verify((pte_t *)pmd, node, addr, next);
continue;
}
pr_warn_once("vmemmap: falling back to regular page backing\n");
if (vmemmap_populate_basepages(addr, next, node))
return -ENOMEM;
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment