Commit 3fccb74c authored by David Hildenbrand's avatar David Hildenbrand Committed by Linus Torvalds

mm/memory_hotplug: remove move_pfn_range()

Let's remove this indirection.  We need the zone in the caller either way,
so let's just detect it there.  Add some documentation for
move_pfn_range_to_zone() instead.

[akpm@linux-foundation.org: restore newline, per David]
Link: http://lkml.kernel.org/r/20190724142324.3686-1-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6aa9b8b2
...@@ -714,8 +714,13 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon ...@@ -714,8 +714,13 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
pgdat->node_start_pfn = start_pfn; pgdat->node_start_pfn = start_pfn;
pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
}
}
/*
* Associate the pfn range with the given zone, initializing the memmaps
* and resizing the pgdat/zone data to span the added pages. After this
* call, all affected pages are PG_reserved.
*/
void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap) unsigned long nr_pages, struct vmem_altmap *altmap)
{ {
...@@ -804,20 +809,6 @@ struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, ...@@ -804,20 +809,6 @@ struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
return default_zone_for_pfn(nid, start_pfn, nr_pages); return default_zone_for_pfn(nid, start_pfn, nr_pages);
} }
/*
* Associates the given pfn range with the given node and the zone appropriate
* for the given online type.
*/
static struct zone * __meminit move_pfn_range(int online_type, int nid,
unsigned long start_pfn, unsigned long nr_pages)
{
struct zone *zone;
zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL);
return zone;
}
int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
{ {
unsigned long flags; unsigned long flags;
...@@ -840,7 +831,8 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ ...@@ -840,7 +831,8 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
put_device(&mem->dev); put_device(&mem->dev);
/* associate pfn range with the zone */ /* associate pfn range with the zone */
zone = move_pfn_range(online_type, nid, pfn, nr_pages); zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
arg.start_pfn = pfn; arg.start_pfn = pfn;
arg.nr_pages = nr_pages; arg.nr_pages = nr_pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment