Commit 5557c766 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, memory_hotplug: cleanup memory offline path

check_pages_isolated_cb currently accounts the whole pfn range as being
offlined if test_pages_isolated suceeds on the range.  This is based on
the assumption that all pages in the range are freed which is currently
the case in most cases but it won't be with later changes, as pages marked
as vmemmap won't be isolated.

Move the offlined pages counting to offline_isolated_pages_cb and rely on
__offline_isolated_pages to return the correct value.
check_pages_isolated_cb will still do it's primary job and check the pfn
range.

While we are at it remove check_pages_isolated and offline_isolated_pages
and use directly walk_system_ram_range as do in online_pages.

Link: http://lkml.kernel.org/r/20190408082633.2864-2-osalvador@suse.deReviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0e56acae
...@@ -87,7 +87,8 @@ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); ...@@ -87,7 +87,8 @@ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
extern int online_pages(unsigned long, unsigned long, int); extern int online_pages(unsigned long, unsigned long, int);
extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
unsigned long *valid_start, unsigned long *valid_end); unsigned long *valid_start, unsigned long *valid_end);
extern void __offline_isolated_pages(unsigned long, unsigned long); extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
unsigned long end_pfn);
typedef void (*online_page_callback_t)(struct page *page, unsigned int order); typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
......
...@@ -1449,15 +1449,10 @@ static int ...@@ -1449,15 +1449,10 @@ static int
offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
void *data) void *data)
{ {
__offline_isolated_pages(start, start + nr_pages); unsigned long *offlined_pages = (unsigned long *)data;
return 0;
}
static void *offlined_pages += __offline_isolated_pages(start, start + nr_pages);
offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) return 0;
{
walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
offline_isolated_pages_cb);
} }
/* /*
...@@ -1467,26 +1462,7 @@ static int ...@@ -1467,26 +1462,7 @@ static int
check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
void *data) void *data)
{ {
int ret; return test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
long offlined = *(long *)data;
ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
offlined = nr_pages;
if (!ret)
*(long *)data += offlined;
return ret;
}
static long
check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
{
long offlined = 0;
int ret;
ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
check_pages_isolated_cb);
if (ret < 0)
offlined = (long)ret;
return offlined;
} }
static int __init cmdline_parse_movable_node(char *p) static int __init cmdline_parse_movable_node(char *p)
...@@ -1571,7 +1547,7 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1571,7 +1547,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
unsigned long end_pfn) unsigned long end_pfn)
{ {
unsigned long pfn, nr_pages; unsigned long pfn, nr_pages;
long offlined_pages; unsigned long offlined_pages = 0;
int ret, node, nr_isolate_pageblock; int ret, node, nr_isolate_pageblock;
unsigned long flags; unsigned long flags;
unsigned long valid_start, valid_end; unsigned long valid_start, valid_end;
...@@ -1647,14 +1623,15 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1647,14 +1623,15 @@ static int __ref __offline_pages(unsigned long start_pfn,
goto failed_removal_isolated; goto failed_removal_isolated;
} }
/* check again */ /* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn); ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
} while (offlined_pages < 0); NULL, check_pages_isolated_cb);
} while (ret);
pr_info("Offlined Pages %ld\n", offlined_pages);
/* Ok, all of our target is isolated. /* Ok, all of our target is isolated.
We cannot do rollback at this point. */ We cannot do rollback at this point. */
offline_isolated_pages(start_pfn, end_pfn); walk_system_ram_range(start_pfn, end_pfn - start_pfn,
&offlined_pages, offline_isolated_pages_cb);
pr_info("Offlined Pages %ld\n", offlined_pages);
/* /*
* Onlining will reset pagetype flags and makes migrate type * Onlining will reset pagetype flags and makes migrate type
* MOVABLE, so just need to decrease the number of isolated * MOVABLE, so just need to decrease the number of isolated
......
...@@ -8453,7 +8453,7 @@ void zone_pcp_reset(struct zone *zone) ...@@ -8453,7 +8453,7 @@ void zone_pcp_reset(struct zone *zone)
* All pages in the range must be in a single zone and isolated * All pages in the range must be in a single zone and isolated
* before calling this. * before calling this.
*/ */
void unsigned long
__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{ {
struct page *page; struct page *page;
...@@ -8461,12 +8461,15 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) ...@@ -8461,12 +8461,15 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
unsigned int order, i; unsigned int order, i;
unsigned long pfn; unsigned long pfn;
unsigned long flags; unsigned long flags;
unsigned long offlined_pages = 0;
/* find the first valid pfn */ /* find the first valid pfn */
for (pfn = start_pfn; pfn < end_pfn; pfn++) for (pfn = start_pfn; pfn < end_pfn; pfn++)
if (pfn_valid(pfn)) if (pfn_valid(pfn))
break; break;
if (pfn == end_pfn) if (pfn == end_pfn)
return; return offlined_pages;
offline_mem_sections(pfn, end_pfn); offline_mem_sections(pfn, end_pfn);
zone = page_zone(pfn_to_page(pfn)); zone = page_zone(pfn_to_page(pfn));
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
...@@ -8484,12 +8487,14 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) ...@@ -8484,12 +8487,14 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
pfn++; pfn++;
SetPageReserved(page); SetPageReserved(page);
offlined_pages++;
continue; continue;
} }
BUG_ON(page_count(page)); BUG_ON(page_count(page));
BUG_ON(!PageBuddy(page)); BUG_ON(!PageBuddy(page));
order = page_order(page); order = page_order(page);
offlined_pages += 1 << order;
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
pr_info("remove from free list %lx %d %lx\n", pr_info("remove from free list %lx %d %lx\n",
pfn, 1 << order, end_pfn); pfn, 1 << order, end_pfn);
...@@ -8502,6 +8507,8 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) ...@@ -8502,6 +8507,8 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
pfn += (1 << order); pfn += (1 << order);
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
return offlined_pages;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment