Commit 59b52f10 authored by David Hildenbrand's avatar David Hildenbrand Committed by Boris Ostrovsky

xen/balloon: Mark pages PG_offline in balloon_append()

Let's move the __SetPageOffline() call which all callers perform into
balloon_append().

In bp_state decrease_reservation(), pages are now marked PG_offline a
little later than before, however, this should not matter for XEN.
Suggested-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
parent dde3285f
...@@ -158,6 +158,8 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); ...@@ -158,6 +158,8 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
/* balloon_append: add the given page to the balloon. */ /* balloon_append: add the given page to the balloon. */
static void balloon_append(struct page *page) static void balloon_append(struct page *page)
{ {
__SetPageOffline(page);
/* Lowmem is re-populated first, so highmem pages go at list tail. */ /* Lowmem is re-populated first, so highmem pages go at list tail. */
if (PageHighMem(page)) { if (PageHighMem(page)) {
list_add_tail(&page->lru, &ballooned_pages); list_add_tail(&page->lru, &ballooned_pages);
...@@ -372,7 +374,6 @@ static void xen_online_page(struct page *page, unsigned int order) ...@@ -372,7 +374,6 @@ static void xen_online_page(struct page *page, unsigned int order)
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
p = pfn_to_page(start_pfn + i); p = pfn_to_page(start_pfn + i);
__online_page_set_limits(p); __online_page_set_limits(p);
__SetPageOffline(p);
balloon_append(p); balloon_append(p);
} }
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
...@@ -466,7 +467,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -466,7 +467,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN; state = BP_EAGAIN;
break; break;
} }
__SetPageOffline(page);
adjust_managed_page_count(page, -1); adjust_managed_page_count(page, -1);
xenmem_reservation_scrub_page(page); xenmem_reservation_scrub_page(page);
list_add(&page->lru, &pages); list_add(&page->lru, &pages);
...@@ -648,11 +648,9 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) ...@@ -648,11 +648,9 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
mutex_lock(&balloon_mutex); mutex_lock(&balloon_mutex);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
if (pages[i]) { if (pages[i])
__SetPageOffline(pages[i]);
balloon_append(pages[i]); balloon_append(pages[i]);
} }
}
balloon_stats.target_unpopulated -= nr_pages; balloon_stats.target_unpopulated -= nr_pages;
...@@ -669,7 +667,6 @@ static void __init balloon_add_region(unsigned long start_pfn, ...@@ -669,7 +667,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages) unsigned long pages)
{ {
unsigned long pfn, extra_pfn_end; unsigned long pfn, extra_pfn_end;
struct page *page;
/* /*
* If the amount of usable memory has been limited (e.g., with * If the amount of usable memory has been limited (e.g., with
...@@ -679,12 +676,10 @@ static void __init balloon_add_region(unsigned long start_pfn, ...@@ -679,12 +676,10 @@ static void __init balloon_add_region(unsigned long start_pfn,
extra_pfn_end = min(max_pfn, start_pfn + pages); extra_pfn_end = min(max_pfn, start_pfn + pages);
for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
page = pfn_to_page(pfn);
/* totalram_pages and totalhigh_pages do not /* totalram_pages and totalhigh_pages do not
include the boot-time balloon extension, so include the boot-time balloon extension, so
don't subtract from it. */ don't subtract from it. */
__SetPageOffline(page); balloon_append(pfn_to_page(pfn));
balloon_append(page);
} }
balloon_stats.total_pages += extra_pfn_end - start_pfn; balloon_stats.total_pages += extra_pfn_end - start_pfn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment