Commit 6cdd0b30 authored by David Hildenbrand's avatar David Hildenbrand Committed by Linus Torvalds

mm/memory_hotplug.c: cleanup __add_pages()

Let's drop the basically unused section stuff and simplify.  The logic now
matches the logic in __remove_pages().
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarBaoquan He <bhe@redhat.com>
Reviewed-by: default avatarWei Yang <richard.weiyang@gmail.com>
Cc: Segher Boessenkool <segher@kernel.crashing.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Link: http://lkml.kernel.org/r/20200228095819.10750-3-david@redhat.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a11b9419
...@@ -307,8 +307,9 @@ static int check_hotplug_memory_addressable(unsigned long pfn, ...@@ -307,8 +307,9 @@ static int check_hotplug_memory_addressable(unsigned long pfn,
int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
struct mhp_restrictions *restrictions) struct mhp_restrictions *restrictions)
{ {
const unsigned long end_pfn = pfn + nr_pages;
unsigned long cur_nr_pages;
int err; int err;
unsigned long nr, start_sec, end_sec;
struct vmem_altmap *altmap = restrictions->altmap; struct vmem_altmap *altmap = restrictions->altmap;
err = check_hotplug_memory_addressable(pfn, nr_pages); err = check_hotplug_memory_addressable(pfn, nr_pages);
...@@ -331,18 +332,13 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, ...@@ -331,18 +332,13 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
if (err) if (err)
return err; return err;
start_sec = pfn_to_section_nr(pfn); for (; pfn < end_pfn; pfn += cur_nr_pages) {
end_sec = pfn_to_section_nr(pfn + nr_pages - 1); /* Select all remaining pages up to the next section boundary */
for (nr = start_sec; nr <= end_sec; nr++) { cur_nr_pages = min(end_pfn - pfn,
unsigned long pfns; SECTION_ALIGN_UP(pfn + 1) - pfn);
err = sparse_add_section(nid, pfn, cur_nr_pages, altmap);
pfns = min(nr_pages, PAGES_PER_SECTION
- (pfn & ~PAGE_SECTION_MASK));
err = sparse_add_section(nid, pfn, pfns, altmap);
if (err) if (err)
break; break;
pfn += pfns;
nr_pages -= pfns;
cond_resched(); cond_resched();
} }
vmemmap_populate_print_last(); vmemmap_populate_print_last();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment