Commit 966cf44f authored by Alexander Duyck's avatar Alexander Duyck Committed by Linus Torvalds

mm: defer ZONE_DEVICE page initialization to the point where we init pgmap

The ZONE_DEVICE pages were being initialized in two locations.  One was
with the memory_hotplug lock held and another was outside of that lock.
The problem with this is that it was nearly doubling the memory
initialization time.  Instead of doing this twice, once while holding a
global lock and once without, I am opting to defer the initialization to
the one outside of the lock.  This allows us to avoid serializing the
overhead for memory init and we can instead focus on per-node init times.

One issue I encountered is that devm_memremap_pages and
hmm_devmmem_pages_create were initializing only the pgmap field the same
way.  One wasn't initializing hmm_data, and the other was initializing it
to a poison value.  Since this is something that is exposed to the driver
in the case of hmm I am opting for a third option and just initializing
hmm_data to 0 since this is going to be exposed to unknown third party
drivers.

[alexander.h.duyck@linux.intel.com: fix reference count for pgmap in devm_memremap_pages]
  Link: http://lkml.kernel.org/r/20181008233404.1909.37302.stgit@localhost.localdomain
Link: http://lkml.kernel.org/r/20180925202053.3576.66039.stgit@localhost.localdomainSigned-off-by: default avatarAlexander Duyck <alexander.h.duyck@linux.intel.com>
Reviewed-by: default avatarPavel Tatashin <pavel.tatashin@microsoft.com>
Tested-by: default avatarDan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d483da5b
...@@ -848,6 +848,8 @@ static inline bool is_zone_device_page(const struct page *page) ...@@ -848,6 +848,8 @@ static inline bool is_zone_device_page(const struct page *page)
{ {
return page_zonenum(page) == ZONE_DEVICE; return page_zonenum(page) == ZONE_DEVICE;
} }
extern void memmap_init_zone_device(struct zone *, unsigned long,
unsigned long, struct dev_pagemap *);
#else #else
static inline bool is_zone_device_page(const struct page *page) static inline bool is_zone_device_page(const struct page *page)
{ {
......
...@@ -175,10 +175,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -175,10 +175,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
struct vmem_altmap *altmap = pgmap->altmap_valid ? struct vmem_altmap *altmap = pgmap->altmap_valid ?
&pgmap->altmap : NULL; &pgmap->altmap : NULL;
struct resource *res = &pgmap->res; struct resource *res = &pgmap->res;
unsigned long pfn, pgoff, order; struct dev_pagemap *conflict_pgmap;
pgprot_t pgprot = PAGE_KERNEL; pgprot_t pgprot = PAGE_KERNEL;
unsigned long pgoff, order;
int error, nid, is_ram; int error, nid, is_ram;
struct dev_pagemap *conflict_pgmap;
align_start = res->start & ~(SECTION_SIZE - 1); align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
...@@ -256,19 +256,14 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -256,19 +256,14 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
if (error) if (error)
goto err_add_memory; goto err_add_memory;
for_each_device_pfn(pfn, pgmap) { /*
struct page *page = pfn_to_page(pfn); * Initialization of the pages has been deferred until now in order
* to allow us to do the work while not holding the hotplug lock.
/* */
* ZONE_DEVICE pages union ->lru with a ->pgmap back memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
* pointer. It is a bug if a ZONE_DEVICE page is ever align_start >> PAGE_SHIFT,
* freed or placed on a driver-private list. Seed the align_size >> PAGE_SHIFT, pgmap);
* storage with LIST_POISON* values. percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
*/
list_del(&page->lru);
page->pgmap = pgmap;
percpu_ref_get(pgmap->ref);
}
devm_add_action(dev, devm_memremap_pages_release, pgmap); devm_add_action(dev, devm_memremap_pages_release, pgmap);
......
...@@ -1024,7 +1024,6 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) ...@@ -1024,7 +1024,6 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
resource_size_t key, align_start, align_size, align_end; resource_size_t key, align_start, align_size, align_end;
struct device *device = devmem->device; struct device *device = devmem->device;
int ret, nid, is_ram; int ret, nid, is_ram;
unsigned long pfn;
align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1); align_start = devmem->resource->start & ~(PA_SECTION_SIZE - 1);
align_size = ALIGN(devmem->resource->start + align_size = ALIGN(devmem->resource->start +
...@@ -1109,11 +1108,14 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem) ...@@ -1109,11 +1108,14 @@ static int hmm_devmem_pages_create(struct hmm_devmem *devmem)
align_size >> PAGE_SHIFT, NULL); align_size >> PAGE_SHIFT, NULL);
mem_hotplug_done(); mem_hotplug_done();
for (pfn = devmem->pfn_first; pfn < devmem->pfn_last; pfn++) { /*
struct page *page = pfn_to_page(pfn); * Initialization of the pages has been deferred until now in order
* to allow us to do the work while not holding the hotplug lock.
*/
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
align_start >> PAGE_SHIFT,
align_size >> PAGE_SHIFT, &devmem->pagemap);
page->pgmap = &devmem->pagemap;
}
return 0; return 0;
error_add_memory: error_add_memory:
......
...@@ -5465,12 +5465,23 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -5465,12 +5465,23 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
if (highest_memmap_pfn < end_pfn - 1) if (highest_memmap_pfn < end_pfn - 1)
highest_memmap_pfn = end_pfn - 1; highest_memmap_pfn = end_pfn - 1;
#ifdef CONFIG_ZONE_DEVICE
/* /*
* Honor reservation requested by the driver for this ZONE_DEVICE * Honor reservation requested by the driver for this ZONE_DEVICE
* memory * memory. We limit the total number of pages to initialize to just
* those that might contain the memory mapping. We will defer the
* ZONE_DEVICE page initialization until after we have released
* the hotplug lock.
*/ */
if (altmap && start_pfn == altmap->base_pfn) if (zone == ZONE_DEVICE) {
start_pfn += altmap->reserve; if (!altmap)
return;
if (start_pfn == altmap->base_pfn)
start_pfn += altmap->reserve;
end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
}
#endif
for (pfn = start_pfn; pfn < end_pfn; pfn++) { for (pfn = start_pfn; pfn < end_pfn; pfn++) {
/* /*
...@@ -5537,6 +5548,81 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -5537,6 +5548,81 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
} }
} }
#ifdef CONFIG_ZONE_DEVICE
void __ref memmap_init_zone_device(struct zone *zone,
unsigned long start_pfn,
unsigned long size,
struct dev_pagemap *pgmap)
{
unsigned long pfn, end_pfn = start_pfn + size;
struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long zone_idx = zone_idx(zone);
unsigned long start = jiffies;
int nid = pgdat->node_id;
if (WARN_ON_ONCE(!pgmap || !is_dev_zone(zone)))
return;
/*
* The call to memmap_init_zone should have already taken care
* of the pages reserved for the memmap, so we can just jump to
* the end of that region and start processing the device pages.
*/
if (pgmap->altmap_valid) {
struct vmem_altmap *altmap = &pgmap->altmap;
start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
size = end_pfn - start_pfn;
}
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
struct page *page = pfn_to_page(pfn);
__init_single_page(page, pfn, zone_idx, nid);
/*
* Mark page reserved as it will need to wait for onlining
* phase for it to be fully associated with a zone.
*
* We can use the non-atomic __set_bit operation for setting
* the flag as we are still initializing the pages.
*/
__SetPageReserved(page);
/*
* ZONE_DEVICE pages union ->lru with a ->pgmap back
* pointer and hmm_data. It is a bug if a ZONE_DEVICE
* page is ever freed or placed on a driver-private list.
*/
page->pgmap = pgmap;
page->hmm_data = 0;
/*
* Mark the block movable so that blocks are reserved for
* movable at startup. This will force kernel allocations
* to reserve their blocks rather than leaking throughout
* the address space during boot when many long-lived
* kernel allocations are made.
*
* bitmap is created for zone's valid pfn range. but memmap
* can be created for invalid pages (for alignment)
* check here not to call set_pageblock_migratetype() against
* pfn out of zone.
*
* Please note that MEMMAP_HOTPLUG path doesn't clear memmap
* because this is done early in sparse_add_one_section
*/
if (!(pfn & (pageblock_nr_pages - 1))) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
cond_resched();
}
}
pr_info("%s initialised, %lu pages in %ums\n", dev_name(pgmap->dev),
size, jiffies_to_msecs(jiffies - start));
}
#endif
static void __meminit zone_init_free_lists(struct zone *zone) static void __meminit zone_init_free_lists(struct zone *zone)
{ {
unsigned int order, t; unsigned int order, t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment