Commit 30756c62 authored by Julien Grall's avatar Julien Grall Committed by David Vrabel

xen/balloon: Don't rely on the page granularity is the same for Xen and Linux

For ARM64 guests, Linux is able to support either 64K or 4K page
granularity. Although, the hypercall interface is always based on 4K
page granularity.

With 64K page granularity, a single page will be spread over multiple
Xen frame.

To avoid splitting the page into 4K frame, take advantage of the
extent_order field to directly allocate/free chunk of the Linux page
size.

Note that PVMMU is only used for PV guest (which is x86) and the page
granularity is always 4KB. Some BUILD_BUG_ON has been added to ensure
that because the code has not been modified.
Signed-off-by: default avatarJulien Grall <julien.grall@citrix.com>
Reviewed-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent 9652c080
...@@ -112,6 +112,12 @@ static struct ctl_table xen_root[] = { ...@@ -112,6 +112,12 @@ static struct ctl_table xen_root[] = {
#endif #endif
/*
* Use one extent per PAGE_SIZE to avoid to break down the page into
* multiple frame.
*/
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
/* /*
* balloon_process() state: * balloon_process() state:
* *
...@@ -304,6 +310,12 @@ static enum bp_state reserve_additional_memory(void) ...@@ -304,6 +310,12 @@ static enum bp_state reserve_additional_memory(void)
nid = memory_add_physaddr_to_nid(resource->start); nid = memory_add_physaddr_to_nid(resource->start);
#ifdef CONFIG_XEN_HAVE_PVMMU #ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
/* /*
* add_memory() will build page tables for the new memory so * add_memory() will build page tables for the new memory so
* the p2m must contain invalid entries so the correct * the p2m must contain invalid entries so the correct
...@@ -384,11 +396,11 @@ static bool balloon_is_inflated(void) ...@@ -384,11 +396,11 @@ static bool balloon_is_inflated(void)
static enum bp_state increase_reservation(unsigned long nr_pages) static enum bp_state increase_reservation(unsigned long nr_pages)
{ {
int rc; int rc;
unsigned long pfn, i; unsigned long i;
struct page *page; struct page *page;
struct xen_memory_reservation reservation = { struct xen_memory_reservation reservation = {
.address_bits = 0, .address_bits = 0,
.extent_order = 0, .extent_order = EXTENT_ORDER,
.domid = DOMID_SELF .domid = DOMID_SELF
}; };
...@@ -401,7 +413,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages) ...@@ -401,7 +413,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
nr_pages = i; nr_pages = i;
break; break;
} }
frame_list[i] = page_to_pfn(page);
/* XENMEM_populate_physmap requires a PFN based on Xen
* granularity.
*/
frame_list[i] = page_to_xen_pfn(page);
page = balloon_next_page(page); page = balloon_next_page(page);
} }
...@@ -415,10 +431,16 @@ static enum bp_state increase_reservation(unsigned long nr_pages) ...@@ -415,10 +431,16 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
page = balloon_retrieve(false); page = balloon_retrieve(false);
BUG_ON(page == NULL); BUG_ON(page == NULL);
pfn = page_to_pfn(page);
#ifdef CONFIG_XEN_HAVE_PVMMU #ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) { if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
set_phys_to_machine(pfn, frame_list[i]); set_phys_to_machine(pfn, frame_list[i]);
/* Link back into the page tables if not highmem. */ /* Link back into the page tables if not highmem. */
...@@ -445,14 +467,15 @@ static enum bp_state increase_reservation(unsigned long nr_pages) ...@@ -445,14 +467,15 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
{ {
enum bp_state state = BP_DONE; enum bp_state state = BP_DONE;
unsigned long pfn, i; unsigned long i;
struct page *page; struct page *page, *tmp;
int ret; int ret;
struct xen_memory_reservation reservation = { struct xen_memory_reservation reservation = {
.address_bits = 0, .address_bits = 0,
.extent_order = 0, .extent_order = EXTENT_ORDER,
.domid = DOMID_SELF .domid = DOMID_SELF
}; };
LIST_HEAD(pages);
if (nr_pages > ARRAY_SIZE(frame_list)) if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list); nr_pages = ARRAY_SIZE(frame_list);
...@@ -465,8 +488,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -465,8 +488,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
break; break;
} }
scrub_page(page); scrub_page(page);
list_add(&page->lru, &pages);
frame_list[i] = page_to_pfn(page);
} }
/* /*
...@@ -478,14 +500,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -478,14 +500,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
*/ */
kmap_flush_unused(); kmap_flush_unused();
/* Update direct mapping, invalidate P2M, and add to balloon. */ /*
for (i = 0; i < nr_pages; i++) { * Setup the frame, update direct mapping, invalidate P2M,
pfn = frame_list[i]; * and add to balloon.
frame_list[i] = pfn_to_gfn(pfn); */
page = pfn_to_page(pfn); i = 0;
list_for_each_entry_safe(page, tmp, &pages, lru) {
/* XENMEM_decrease_reservation requires a GFN */
frame_list[i++] = xen_page_to_gfn(page);
#ifdef CONFIG_XEN_HAVE_PVMMU #ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) { if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping( ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT), (unsigned long)__va(pfn << PAGE_SHIFT),
...@@ -495,6 +528,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -495,6 +528,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY); __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
} }
#endif #endif
list_del(&page->lru);
balloon_append(page); balloon_append(page);
} }
...@@ -603,6 +637,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) ...@@ -603,6 +637,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
if (page) { if (page) {
pages[pgno++] = page; pages[pgno++] = page;
#ifdef CONFIG_XEN_HAVE_PVMMU #ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
ret = xen_alloc_p2m_entry(page_to_pfn(page)); ret = xen_alloc_p2m_entry(page_to_pfn(page));
if (ret < 0) if (ret < 0)
goto out_undo; goto out_undo;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment