Commit 7c243c71 authored by Russ Anderson's avatar Russ Anderson Committed by Linus Torvalds

mm: speedup in __early_pfn_to_nid

When booting on a large memory system, the kernel spends considerable
time in memmap_init_zone() setting up memory zones.  Analysis shows
significant time spent in __early_pfn_to_nid().

The routine memmap_init_zone() checks each PFN to verify the nid is
valid.  __early_pfn_to_nid() sequentially scans the list of pfn ranges
to find the right range and returns the nid.  This does not scale well.
On a 4 TB (single rack) system there are 308 memory ranges to scan.  The
higher the PFN the more time spent sequentially spinning through memory
ranges.

Since memmap_init_zone() increments pfn, it will almost always be
looking for the same range as the previous pfn, so check that range
first.  If it is in the same range, return that nid.  If not, scan the
list as before.

A 4 TB (single rack) UV1 system takes 512 seconds to get through the
zone code.  This performance optimization reduces the time by 189
seconds, a 36% improvement.

A 2 TB (single rack) UV2 system goes from 212.7 seconds to 99.8 seconds,
a 112.9 second (53%) reduction.

[akpm@linux-foundation.org: make the statics __meminitdata]
[akpm@linux-foundation.org: fix comment formatting]
[akpm@linux-foundation.org: fix ia64, per yinghai]
[akpm@linux-foundation.org: add missing semicolon, per Tony]
Signed-off-by: default avatarRuss Anderson <rja@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Tested-by: default avatar"Luck, Tony" <tony.luck@intel.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Lin Feng <linfeng@cn.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fed5b64a
...@@ -61,14 +61,27 @@ paddr_to_nid(unsigned long paddr) ...@@ -61,14 +61,27 @@ paddr_to_nid(unsigned long paddr)
int __meminit __early_pfn_to_nid(unsigned long pfn) int __meminit __early_pfn_to_nid(unsigned long pfn)
{ {
int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
/*
* NOTE: The following SMP-unsafe globals are only used early in boot
* when the kernel is running single-threaded.
*/
static int __meminitdata last_ssec, last_esec;
static int __meminitdata last_nid;
if (section >= last_ssec && section < last_esec)
return last_nid;
for (i = 0; i < num_node_memblks; i++) { for (i = 0; i < num_node_memblks; i++) {
ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
esec = (node_memblk[i].start_paddr + node_memblk[i].size + esec = (node_memblk[i].start_paddr + node_memblk[i].size +
((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
if (section >= ssec && section < esec) if (section >= ssec && section < esec) {
last_ssec = ssec;
last_esec = esec;
last_nid = node_memblk[i].nid;
return node_memblk[i].nid; return node_memblk[i].nid;
} }
}
return -1; return -1;
} }
......
...@@ -4187,10 +4187,23 @@ int __meminit __early_pfn_to_nid(unsigned long pfn) ...@@ -4187,10 +4187,23 @@ int __meminit __early_pfn_to_nid(unsigned long pfn)
{ {
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
int i, nid; int i, nid;
/*
* NOTE: The following SMP-unsafe globals are only used early in boot
* when the kernel is running single-threaded.
*/
static unsigned long __meminitdata last_start_pfn, last_end_pfn;
static int __meminitdata last_nid;
if (last_start_pfn <= pfn && pfn < last_end_pfn)
return last_nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
if (start_pfn <= pfn && pfn < end_pfn) if (start_pfn <= pfn && pfn < end_pfn) {
last_start_pfn = start_pfn;
last_end_pfn = end_pfn;
last_nid = nid;
return nid; return nid;
}
/* This is a memory hole */ /* This is a memory hole */
return -1; return -1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment