Commit b77eab70 authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Linus Torvalds

mm/memory_hotplug: optimize probe routine

When memory is hotplugged pages_correctly_reserved() is called to verify
that the added memory is present, this routine traverses through every
struct page and verifies that PageReserved() is set.  This is a slow
operation especially if a large amount of memory is added.

Instead of checking every page, it is enough to simply check that the
section is present, has mapping (struct page array is allocated), and
the mapping is online.

In addition, we should not excpect that probe routine sets flags in
struct page, as the struct pages have not yet been initialized.  The
initialization should be done in __init_single_page(), the same as
during boot.

Link: http://lkml.kernel.org/r/20180215165920.8570-5-pasha.tatashin@oracle.comSigned-off-by: default avatarPavel Tatashin <pasha.tatashin@oracle.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Steven Sistare <steven.sistare@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f165b378
...@@ -187,13 +187,14 @@ int memory_isolate_notify(unsigned long val, void *v) ...@@ -187,13 +187,14 @@ int memory_isolate_notify(unsigned long val, void *v)
} }
/* /*
* The probe routines leave the pages reserved, just as the bootmem code does. * The probe routines leave the pages uninitialized, just as the bootmem code
* Make sure they're still that way. * does. Make sure we do not access them, but instead use only information from
* within sections.
*/ */
static bool pages_correctly_reserved(unsigned long start_pfn) static bool pages_correctly_probed(unsigned long start_pfn)
{ {
int i, j; unsigned long section_nr = pfn_to_section_nr(start_pfn);
struct page *page; unsigned long section_nr_end = section_nr + sections_per_block;
unsigned long pfn = start_pfn; unsigned long pfn = start_pfn;
/* /*
...@@ -201,21 +202,24 @@ static bool pages_correctly_reserved(unsigned long start_pfn) ...@@ -201,21 +202,24 @@ static bool pages_correctly_reserved(unsigned long start_pfn)
* SPARSEMEM_VMEMMAP. We lookup the page once per section * SPARSEMEM_VMEMMAP. We lookup the page once per section
* and assume memmap is contiguous within each section * and assume memmap is contiguous within each section
*/ */
for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) { for (; section_nr < section_nr_end; section_nr++) {
if (WARN_ON_ONCE(!pfn_valid(pfn))) if (WARN_ON_ONCE(!pfn_valid(pfn)))
return false; return false;
page = pfn_to_page(pfn);
for (j = 0; j < PAGES_PER_SECTION; j++) {
if (PageReserved(page + j))
continue;
printk(KERN_WARNING "section number %ld page number %d "
"not reserved, was it already online?\n",
pfn_to_section_nr(pfn), j);
if (!present_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) not present",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
} else if (!valid_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
} else if (online_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) is already online",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false; return false;
} }
pfn += PAGES_PER_SECTION;
} }
return true; return true;
...@@ -237,7 +241,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t ...@@ -237,7 +241,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
switch (action) { switch (action) {
case MEM_ONLINE: case MEM_ONLINE:
if (!pages_correctly_reserved(start_pfn)) if (!pages_correctly_probed(start_pfn))
return -EBUSY; return -EBUSY;
ret = online_pages(start_pfn, nr_pages, online_type); ret = online_pages(start_pfn, nr_pages, online_type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment