Commit 69324b8f authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds

mm, devm_memremap_pages: add MEMORY_DEVICE_PRIVATE support

In preparation for consolidating all ZONE_DEVICE enabling via
devm_memremap_pages(), teach it how to handle the constraints of
MEMORY_DEVICE_PRIVATE ranges.

[jglisse@redhat.com: call move_pfn_range_to_zone for MEMORY_DEVICE_PRIVATE]
Link: http://lkml.kernel.org/r/154275559036.76910.12434636179931292607.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarJérôme Glisse <jglisse@redhat.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Reported-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a95c90f1
...@@ -98,9 +98,15 @@ static void devm_memremap_pages_release(void *data) ...@@ -98,9 +98,15 @@ static void devm_memremap_pages_release(void *data)
- align_start; - align_start;
mem_hotplug_begin(); mem_hotplug_begin();
arch_remove_memory(align_start, align_size, pgmap->altmap_valid ? if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
&pgmap->altmap : NULL); pfn = align_start >> PAGE_SHIFT;
kasan_remove_zero_shadow(__va(align_start), align_size); __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
align_size >> PAGE_SHIFT, NULL);
} else {
arch_remove_memory(align_start, align_size,
pgmap->altmap_valid ? &pgmap->altmap : NULL);
kasan_remove_zero_shadow(__va(align_start), align_size);
}
mem_hotplug_done(); mem_hotplug_done();
untrack_pfn(NULL, PHYS_PFN(align_start), align_size); untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
...@@ -187,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -187,17 +193,40 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
goto err_pfn_remap; goto err_pfn_remap;
mem_hotplug_begin(); mem_hotplug_begin();
error = kasan_add_zero_shadow(__va(align_start), align_size);
if (error) { /*
mem_hotplug_done(); * For device private memory we call add_pages() as we only need to
goto err_kasan; * allocate and initialize struct page for the device memory. More-
* over the device memory is un-accessible thus we do not want to
* create a linear mapping for the memory like arch_add_memory()
* would do.
*
* For all other device memory types, which are accessible by
* the CPU, we do want the linear mapping and thus use
* arch_add_memory().
*/
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
error = add_pages(nid, align_start >> PAGE_SHIFT,
align_size >> PAGE_SHIFT, NULL, false);
} else {
error = kasan_add_zero_shadow(__va(align_start), align_size);
if (error) {
mem_hotplug_done();
goto err_kasan;
}
error = arch_add_memory(nid, align_start, align_size, altmap,
false);
}
if (!error) {
struct zone *zone;
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
align_size >> PAGE_SHIFT, altmap);
} }
error = arch_add_memory(nid, align_start, align_size, altmap, false);
if (!error)
move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
align_start >> PAGE_SHIFT,
align_size >> PAGE_SHIFT, altmap);
mem_hotplug_done(); mem_hotplug_done();
if (error) if (error)
goto err_add_memory; goto err_add_memory;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment