Commit ec393a0f authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Linus Torvalds

mm: return zero_resv_unavail optimization

When checking for valid pfns in zero_resv_unavail(), it is not necessary
to verify that pfns within pageblock_nr_pages ranges are valid, only the
first one needs to be checked.  This is because memory for pages are
allocated in contiguous chunks that contain pageblock_nr_pages struct
pages.

Link: http://lkml.kernel.org/r/20181002143821.5112-3-msys.mizuma@gmail.comSigned-off-by: default avatarPavel Tatashin <pavel.tatashin@microsoft.com>
Signed-off-by: default avatarMasayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: default avatarMasayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Acked-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 907ec5fc
...@@ -6509,6 +6509,29 @@ void __init free_area_init_node(int nid, unsigned long *zones_size, ...@@ -6509,6 +6509,29 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
} }
#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) #if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
/*
* Zero all valid struct pages in range [spfn, epfn), return number of struct
* pages zeroed
*/
static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn)
{
unsigned long pfn;
u64 pgcnt = 0;
for (pfn = spfn; pfn < epfn; pfn++) {
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
+ pageblock_nr_pages - 1;
continue;
}
mm_zero_struct_page(pfn_to_page(pfn));
pgcnt++;
}
return pgcnt;
}
/* /*
* Only struct pages that are backed by physical memory are zeroed and * Only struct pages that are backed by physical memory are zeroed and
* initialized by going through __init_single_page(). But, there are some * initialized by going through __init_single_page(). But, there are some
...@@ -6524,7 +6547,6 @@ void __init free_area_init_node(int nid, unsigned long *zones_size, ...@@ -6524,7 +6547,6 @@ void __init free_area_init_node(int nid, unsigned long *zones_size,
void __init zero_resv_unavail(void) void __init zero_resv_unavail(void)
{ {
phys_addr_t start, end; phys_addr_t start, end;
unsigned long pfn;
u64 i, pgcnt; u64 i, pgcnt;
phys_addr_t next = 0; phys_addr_t next = 0;
...@@ -6534,34 +6556,18 @@ void __init zero_resv_unavail(void) ...@@ -6534,34 +6556,18 @@ void __init zero_resv_unavail(void)
pgcnt = 0; pgcnt = 0;
for_each_mem_range(i, &memblock.memory, NULL, for_each_mem_range(i, &memblock.memory, NULL,
NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) { NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) {
if (next < start) { if (next < start)
for (pfn = PFN_DOWN(next); pfn < PFN_UP(start); pfn++) { pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start));
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
continue;
mm_zero_struct_page(pfn_to_page(pfn));
pgcnt++;
}
}
next = end; next = end;
} }
for (pfn = PFN_DOWN(next); pfn < max_pfn; pfn++) { pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn);
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
continue;
mm_zero_struct_page(pfn_to_page(pfn));
pgcnt++;
}
/* /*
* Struct pages that do not have backing memory. This could be because * Struct pages that do not have backing memory. This could be because
* firmware is using some of this memory, or for some other reasons. * firmware is using some of this memory, or for some other reasons.
* Once memblock is changed so such behaviour is not allowed: i.e.
* list of "reserved" memory must be a subset of list of "memory", then
* this code can be removed.
*/ */
if (pgcnt) if (pgcnt)
pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt); pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
} }
#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */ #endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment