Commit 4fbb67e3 authored by Matt Rushton's avatar Matt Rushton Committed by Stefano Stabellini

xen/setup: Remap Xen Identity Mapped RAM

Instead of ballooning up and down dom0 memory this remaps the existing mfns
that were replaced by the identity map. The reason for this is that the
existing implementation ballooned memory up and and down which caused dom0
to have discontiguous pages. In some cases this resulted in the use of bounce
buffers which reduced network I/O performance significantly. This change will
honor the existing order of the pages with the exception of some boundary
conditions.

To do this we need to update both the Linux p2m table and the Xen m2p table.
Particular care must be taken when updating the p2m table since it's important
to limit table memory consumption and reuse the existing leaf pages which get
freed when an entire leaf page is set to the identity map. To implement this,
mapping updates are grouped into blocks with table entries getting cached
temporarily and then released.

On my test system before:
Total pages: 2105014
Total contiguous: 1640635

After:
Total pages: 2105014
Total contiguous: 2098904
Signed-off-by: default avatarMatthew Rushton <mrushton@amazon.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent 0f33be00
...@@ -173,6 +173,7 @@ ...@@ -173,6 +173,7 @@
#include <xen/balloon.h> #include <xen/balloon.h>
#include <xen/grant_table.h> #include <xen/grant_table.h>
#include "p2m.h"
#include "multicalls.h" #include "multicalls.h"
#include "xen-ops.h" #include "xen-ops.h"
...@@ -180,12 +181,6 @@ static void __init m2p_override_init(void); ...@@ -180,12 +181,6 @@ static void __init m2p_override_init(void);
unsigned long xen_max_p2m_pfn __read_mostly; unsigned long xen_max_p2m_pfn __read_mostly;
#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
/* Placeholders for holes in the address space */ /* Placeholders for holes in the address space */
static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
...@@ -202,16 +197,12 @@ static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE); ...@@ -202,16 +197,12 @@ static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE);
RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
/* We might hit two boundary violations at the start and end, at max each /* For each I/O range remapped we may lose up to two leaf pages for the boundary
* boundary violation will require three middle nodes. */ * violations and three mid pages to cover up to 3GB. With
RESERVE_BRK(p2m_mid_extra, PAGE_SIZE * 2 * 3); * early_can_reuse_p2m_middle() most of the leaf pages will be reused by the
* remapped region.
/* When we populate back during bootup, the amount of pages can vary. The */
* max we have is seen is 395979, but that does not mean it can't be more. RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES);
* Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
* it can re-use Xen provided mfn_list array, so we only need to allocate at
* most three P2M top nodes. */
RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
static inline unsigned p2m_top_index(unsigned long pfn) static inline unsigned p2m_top_index(unsigned long pfn)
{ {
......
#ifndef _XEN_P2M_H
#define _XEN_P2M_H
#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
#define MAX_REMAP_RANGES 10
extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
#endif /* _XEN_P2M_H */
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <xen/features.h> #include <xen/features.h>
#include "xen-ops.h" #include "xen-ops.h"
#include "vdso.h" #include "vdso.h"
#include "p2m.h"
/* These are code, but not functions. Defined in entry.S */ /* These are code, but not functions. Defined in entry.S */
extern const char xen_hypervisor_callback[]; extern const char xen_hypervisor_callback[];
...@@ -46,6 +47,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; ...@@ -46,6 +47,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
/* Number of pages released from the initial allocation. */ /* Number of pages released from the initial allocation. */
unsigned long xen_released_pages; unsigned long xen_released_pages;
/* Buffer used to remap identity mapped pages */
unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata;
/* /*
* The maximum amount of extra memory compared to the base size. The * The maximum amount of extra memory compared to the base size. The
* main scaling factor is the size of struct page. At extreme ratios * main scaling factor is the size of struct page. At extreme ratios
...@@ -151,107 +155,325 @@ static unsigned long __init xen_do_chunk(unsigned long start, ...@@ -151,107 +155,325 @@ static unsigned long __init xen_do_chunk(unsigned long start,
return len; return len;
} }
static unsigned long __init xen_release_chunk(unsigned long start, /*
unsigned long end) * Finds the next RAM pfn available in the E820 map after min_pfn.
{ * This function updates min_pfn with the pfn found and returns
return xen_do_chunk(start, end, true); * the size of that range or zero if not found.
} */
static unsigned long __init xen_find_pfn_range(
static unsigned long __init xen_populate_chunk(
const struct e820entry *list, size_t map_size, const struct e820entry *list, size_t map_size,
unsigned long max_pfn, unsigned long *last_pfn, unsigned long *min_pfn)
unsigned long credits_left)
{ {
const struct e820entry *entry; const struct e820entry *entry;
unsigned int i; unsigned int i;
unsigned long done = 0; unsigned long done = 0;
unsigned long dest_pfn;
for (i = 0, entry = list; i < map_size; i++, entry++) { for (i = 0, entry = list; i < map_size; i++, entry++) {
unsigned long s_pfn; unsigned long s_pfn;
unsigned long e_pfn; unsigned long e_pfn;
unsigned long pfns;
long capacity;
if (credits_left <= 0)
break;
if (entry->type != E820_RAM) if (entry->type != E820_RAM)
continue; continue;
e_pfn = PFN_DOWN(entry->addr + entry->size); e_pfn = PFN_DOWN(entry->addr + entry->size);
/* We only care about E820 after the xen_start_info->nr_pages */ /* We only care about E820 after this */
if (e_pfn <= max_pfn) if (e_pfn < *min_pfn)
continue; continue;
s_pfn = PFN_UP(entry->addr); s_pfn = PFN_UP(entry->addr);
/* If the E820 falls within the nr_pages, we want to start
* at the nr_pages PFN. /* If min_pfn falls within the E820 entry, we want to start
* If that would mean going past the E820 entry, skip it * at the min_pfn PFN.
*/ */
if (s_pfn <= max_pfn) { if (s_pfn <= *min_pfn) {
capacity = e_pfn - max_pfn; done = e_pfn - *min_pfn;
dest_pfn = max_pfn;
} else { } else {
capacity = e_pfn - s_pfn; done = e_pfn - s_pfn;
dest_pfn = s_pfn; *min_pfn = s_pfn;
} }
if (credits_left < capacity)
capacity = credits_left;
pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
done += pfns;
*last_pfn = (dest_pfn + pfns);
if (pfns < capacity)
break; break;
credits_left -= pfns;
} }
return done; return done;
} }
static void __init xen_set_identity_and_release_chunk( /*
unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, * This releases a chunk of memory and then does the identity map. It's used as
unsigned long *released, unsigned long *identity) * as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
unsigned long *released)
{ {
unsigned long pfn; WARN_ON(start_pfn > end_pfn);
/* Need to release pages first */
*released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true);
*identity += set_phys_range_identity(start_pfn, end_pfn);
}
/*
* Helper function to update both the p2m and m2p tables.
*/
static unsigned long __init xen_update_mem_tables(unsigned long pfn,
unsigned long mfn)
{
struct mmu_update update = {
.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
.val = pfn
};
/* Update p2m */
if (!early_set_phys_to_machine(pfn, mfn)) {
WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
pfn, mfn);
return false;
}
/* Update m2p */
if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
mfn, pfn);
return false;
}
return true;
}
/*
* This function updates the p2m and m2p tables with an identity map from
* start_pfn to start_pfn+size and remaps the underlying RAM of the original
* allocation at remap_pfn. It must do so carefully in P2M_PER_PAGE sized blocks
* to not exhaust the reserved brk space. Doing it in properly aligned blocks
* ensures we only allocate the minimum required leaf pages in the p2m table. It
* copies the existing mfns from the p2m table under the 1:1 map, overwrites
* them with the identity map and then updates the p2m and m2p tables with the
* remapped memory.
*/
static unsigned long __init xen_do_set_identity_and_remap_chunk(
unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
{
unsigned long ident_pfn_iter, remap_pfn_iter;
unsigned long ident_start_pfn_align, remap_start_pfn_align;
unsigned long ident_end_pfn_align, remap_end_pfn_align;
unsigned long ident_boundary_pfn, remap_boundary_pfn;
unsigned long ident_cnt = 0;
unsigned long remap_cnt = 0;
unsigned long left = size;
unsigned long mod;
int i;
WARN_ON(size == 0);
BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
/* /*
* If the PFNs are currently mapped, clear the mappings * Determine the proper alignment to remap memory in P2M_PER_PAGE sized
* (except for the ISA region which must be 1:1 mapped) to * blocks. We need to keep track of both the existing pfn mapping and
* release the refcounts (in Xen) on the original frames. * the new pfn remapping.
*/ */
for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) { mod = start_pfn % P2M_PER_PAGE;
pte_t pte = __pte_ma(0); ident_start_pfn_align =
mod ? (start_pfn - mod + P2M_PER_PAGE) : start_pfn;
mod = remap_pfn % P2M_PER_PAGE;
remap_start_pfn_align =
mod ? (remap_pfn - mod + P2M_PER_PAGE) : remap_pfn;
mod = (start_pfn + size) % P2M_PER_PAGE;
ident_end_pfn_align = start_pfn + size - mod;
mod = (remap_pfn + size) % P2M_PER_PAGE;
remap_end_pfn_align = remap_pfn + size - mod;
/* Iterate over each p2m leaf node in each range */
for (ident_pfn_iter = ident_start_pfn_align, remap_pfn_iter = remap_start_pfn_align;
ident_pfn_iter < ident_end_pfn_align && remap_pfn_iter < remap_end_pfn_align;
ident_pfn_iter += P2M_PER_PAGE, remap_pfn_iter += P2M_PER_PAGE) {
/* Check we aren't past the end */
BUG_ON(ident_pfn_iter + P2M_PER_PAGE > start_pfn + size);
BUG_ON(remap_pfn_iter + P2M_PER_PAGE > remap_pfn + size);
/* Save p2m mappings */
for (i = 0; i < P2M_PER_PAGE; i++)
xen_remap_buf[i] = pfn_to_mfn(ident_pfn_iter + i);
/* Set identity map which will free a p2m leaf */
ident_cnt += set_phys_range_identity(ident_pfn_iter,
ident_pfn_iter + P2M_PER_PAGE);
#ifdef DEBUG
/* Helps verify a p2m leaf has been freed */
for (i = 0; i < P2M_PER_PAGE; i++) {
unsigned int pfn = ident_pfn_iter + i;
BUG_ON(pfn_to_mfn(pfn) != pfn);
}
#endif
/* Now remap memory */
for (i = 0; i < P2M_PER_PAGE; i++) {
unsigned long mfn = xen_remap_buf[i];
/* This will use the p2m leaf freed above */
if (!xen_update_mem_tables(remap_pfn_iter + i, mfn)) {
WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
remap_pfn_iter + i, mfn);
return 0;
}
if (pfn < PFN_UP(ISA_END_ADDRESS)) remap_cnt++;
pte = mfn_pte(pfn, PAGE_KERNEL_IO); }
(void)HYPERVISOR_update_va_mapping( left -= P2M_PER_PAGE;
(unsigned long)__va(pfn << PAGE_SHIFT), pte, 0);
} }
if (start_pfn < nr_pages) /* Max boundary space possible */
*released += xen_release_chunk( BUG_ON(left > (P2M_PER_PAGE - 1) * 2);
start_pfn, min(end_pfn, nr_pages));
*identity += set_phys_range_identity(start_pfn, end_pfn); /* Now handle the boundary conditions */
ident_boundary_pfn = start_pfn;
remap_boundary_pfn = remap_pfn;
for (i = 0; i < left; i++) {
unsigned long mfn;
/* These two checks move from the start to end boundaries */
if (ident_boundary_pfn == ident_start_pfn_align)
ident_boundary_pfn = ident_pfn_iter;
if (remap_boundary_pfn == remap_start_pfn_align)
remap_boundary_pfn = remap_pfn_iter;
/* Check we aren't past the end */
BUG_ON(ident_boundary_pfn >= start_pfn + size);
BUG_ON(remap_boundary_pfn >= remap_pfn + size);
mfn = pfn_to_mfn(ident_boundary_pfn);
if (!xen_update_mem_tables(remap_boundary_pfn, mfn)) {
WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
remap_pfn_iter + i, mfn);
return 0;
}
remap_cnt++;
ident_boundary_pfn++;
remap_boundary_pfn++;
}
/* Finish up the identity map */
if (ident_start_pfn_align >= ident_end_pfn_align) {
/*
* In this case we have an identity range which does not span an
* aligned block so everything needs to be identity mapped here.
* If we didn't check this we might remap too many pages since
* the align boundaries are not meaningful in this case.
*/
ident_cnt += set_phys_range_identity(start_pfn,
start_pfn + size);
} else {
/* Remapped above so check each end of the chunk */
if (start_pfn < ident_start_pfn_align)
ident_cnt += set_phys_range_identity(start_pfn,
ident_start_pfn_align);
if (start_pfn + size > ident_pfn_iter)
ident_cnt += set_phys_range_identity(ident_pfn_iter,
start_pfn + size);
}
BUG_ON(ident_cnt != size);
BUG_ON(remap_cnt != size);
return size;
} }
static unsigned long __init xen_set_identity_and_release( /*
const struct e820entry *list, size_t map_size, unsigned long nr_pages) * This function takes a contiguous pfn range that needs to be identity mapped
* and:
*
* 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
* 2) Calls the do_ function to actually do the mapping/remapping work.
*
* The goal is to not allocate additional memory but to remap the existing
* pages. In the case of an error the underlying memory is simply released back
* to Xen and not remapped.
*/
static unsigned long __init xen_set_identity_and_remap_chunk(
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
unsigned long *identity, unsigned long *remapped,
unsigned long *released)
{
unsigned long pfn;
unsigned long i = 0;
unsigned long n = end_pfn - start_pfn;
while (i < n) {
unsigned long cur_pfn = start_pfn + i;
unsigned long left = n - i;
unsigned long size = left;
unsigned long remap_range_size;
/* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) {
/* Identity map remaining pages */
*identity += set_phys_range_identity(cur_pfn,
cur_pfn + size);
break;
}
if (cur_pfn + size > nr_pages)
size = nr_pages - cur_pfn;
remap_range_size = xen_find_pfn_range(list, map_size,
&remap_pfn);
if (!remap_range_size) {
pr_warning("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages, identity, released);
break;
}
/* Adjust size to fit in current e820 RAM region */
if (size > remap_range_size)
size = remap_range_size;
if (!xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn)) {
WARN(1, "Failed to remap 1:1 memory cur_pfn=%ld size=%ld remap_pfn=%ld\n",
cur_pfn, size, remap_pfn);
xen_set_identity_and_release_chunk(cur_pfn,
cur_pfn + left, nr_pages, identity, released);
break;
}
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
*identity += size;
*remapped += size;
}
/*
* If the PFNs are currently mapped, the VA mapping also needs
* to be updated to be 1:1.
*/
for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
(void)HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(pfn, PAGE_KERNEL_IO), 0);
return remap_pfn;
}
static unsigned long __init xen_set_identity_and_remap(
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
unsigned long *released)
{ {
phys_addr_t start = 0; phys_addr_t start = 0;
unsigned long released = 0;
unsigned long identity = 0; unsigned long identity = 0;
unsigned long remapped = 0;
unsigned long last_pfn = nr_pages;
const struct e820entry *entry; const struct e820entry *entry;
unsigned long num_released = 0;
int i; int i;
/* /*
* Combine non-RAM regions and gaps until a RAM region (or the * Combine non-RAM regions and gaps until a RAM region (or the
* end of the map) is reached, then set the 1:1 map and * end of the map) is reached, then set the 1:1 map and
* release the pages (if available) in those non-RAM regions. * remap the memory in those non-RAM regions.
* *
* The combined non-RAM regions are rounded to a whole number * The combined non-RAM regions are rounded to a whole number
* of pages so any partial pages are accessible via the 1:1 * of pages so any partial pages are accessible via the 1:1
...@@ -269,22 +491,24 @@ static unsigned long __init xen_set_identity_and_release( ...@@ -269,22 +491,24 @@ static unsigned long __init xen_set_identity_and_release(
end_pfn = PFN_UP(entry->addr); end_pfn = PFN_UP(entry->addr);
if (start_pfn < end_pfn) if (start_pfn < end_pfn)
xen_set_identity_and_release_chunk( last_pfn = xen_set_identity_and_remap_chunk(
start_pfn, end_pfn, nr_pages, list, map_size, start_pfn,
&released, &identity); end_pfn, nr_pages, last_pfn,
&identity, &remapped,
&num_released);
start = end; start = end;
} }
} }
if (released) *released = num_released;
printk(KERN_INFO "Released %lu pages of unused memory\n", released);
if (identity)
printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
return released; pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
} pr_info("Remapped %ld page(s), last_pfn=%ld\n", remapped,
last_pfn);
pr_info("Released %ld page(s)\n", num_released);
return last_pfn;
}
static unsigned long __init xen_get_max_pages(void) static unsigned long __init xen_get_max_pages(void)
{ {
unsigned long max_pages = MAX_DOMAIN_PAGES; unsigned long max_pages = MAX_DOMAIN_PAGES;
...@@ -347,7 +571,6 @@ char * __init xen_memory_setup(void) ...@@ -347,7 +571,6 @@ char * __init xen_memory_setup(void)
unsigned long max_pages; unsigned long max_pages;
unsigned long last_pfn = 0; unsigned long last_pfn = 0;
unsigned long extra_pages = 0; unsigned long extra_pages = 0;
unsigned long populated;
int i; int i;
int op; int op;
...@@ -392,20 +615,11 @@ char * __init xen_memory_setup(void) ...@@ -392,20 +615,11 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn; extra_pages += max_pages - max_pfn;
/* /*
* Set P2M for all non-RAM pages and E820 gaps to be identity * Set identity map on non-RAM pages and remap the underlying RAM.
* type PFNs. Any RAM pages that would be made inaccesible by
* this are first released.
*/ */
xen_released_pages = xen_set_identity_and_release( last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
map, memmap.nr_entries, max_pfn); &xen_released_pages);
/*
* Populate back the non-RAM pages and E820 gaps that had been
* released. */
populated = xen_populate_chunk(map, memmap.nr_entries,
max_pfn, &last_pfn, xen_released_pages);
xen_released_pages -= populated;
extra_pages += xen_released_pages; extra_pages += xen_released_pages;
if (last_pfn > max_pfn) { if (last_pfn > max_pfn) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment