Commit 4e44e44b authored by Mukesh Rathor's avatar Mukesh Rathor Committed by Konrad Rzeszutek Wilk

xen/pvh: MMU changes for PVH (v2)

.. which are surprisingly small compared to the amount for PV code.

PVH uses mostly native mmu ops, we leave the generic (native_*) for
the majority and just overwrite the baremetal with the ones we need.

At startup, we are running with pre-allocated page-tables
courtesy of the tool-stack. But we still need to graft them
in the Linux initial pagetables. However there is no need to
unpin/pin and change them to R/O or R/W.

Note that the xen_pagetable_init due to 7836fec9d0994cc9c9150c5a33f0eb0eb08a335a
"xen/mmu/p2m: Refactor the xen_pagetable_init code." does not
need any changes - we just need to make sure that xen_post_allocator_init
does not alter the pvops from the default native one.
Signed-off-by: default avatarMukesh Rathor <mukesh.rathor@oracle.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: default avatarStefano Stabellini <stefano.stabellini@eu.citrix.com>
parent b621e157
...@@ -1757,6 +1757,10 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) ...@@ -1757,6 +1757,10 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
unsigned long pfn = __pa(addr) >> PAGE_SHIFT; unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot); pte_t pte = pfn_pte(pfn, prot);
/* For PVH no need to set R/O or R/W to pin them or unpin them. */
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG(); BUG();
} }
...@@ -1867,6 +1871,7 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, ...@@ -1867,6 +1871,7 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
* but that's enough to get __va working. We need to fill in the rest * but that's enough to get __va working. We need to fill in the rest
* of the physical mapping once some sort of allocator has been set * of the physical mapping once some sort of allocator has been set
* up. * up.
* NOTE: for PVH, the page tables are native.
*/ */
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{ {
...@@ -1888,6 +1893,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1888,6 +1893,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Zap identity mapping */ /* Zap identity mapping */
init_level4_pgt[0] = __pgd(0); init_level4_pgt[0] = __pgd(0);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Pre-constructed entries are in pfn, so convert to mfn */ /* Pre-constructed entries are in pfn, so convert to mfn */
/* L4[272] -> level3_ident_pgt /* L4[272] -> level3_ident_pgt
* L4[511] -> level3_kernel_pgt */ * L4[511] -> level3_kernel_pgt */
...@@ -1898,7 +1904,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1898,7 +1904,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_k[510] -> level2_kernel_pgt /* L3_k[510] -> level2_kernel_pgt
* L3_i[511] -> level2_fixmap_pgt */ * L3_i[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt); convert_pfn_mfn(level3_kernel_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */ /* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
...@@ -1922,7 +1928,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1922,7 +1928,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
copy_page(level2_fixmap_pgt, l2); copy_page(level2_fixmap_pgt, l2);
/* Note that we don't do anything with level1_fixmap_pgt which /* Note that we don't do anything with level1_fixmap_pgt which
* we don't need. */ * we don't need. */
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Make pagetable pieces RO */ /* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
...@@ -1947,6 +1953,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1947,6 +1953,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
xen_mc_batch(); xen_mc_batch();
__xen_write_cr3(true, __pa(init_level4_pgt)); __xen_write_cr3(true, __pa(init_level4_pgt));
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(PARAVIRT_LAZY_CPU);
} else
native_write_cr3(__pa(init_level4_pgt));
/* We can't that easily rip out L3 and L2, as the Xen pagetables are /* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
...@@ -2107,6 +2115,9 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) ...@@ -2107,6 +2115,9 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
static void __init xen_post_allocator_init(void) static void __init xen_post_allocator_init(void)
{ {
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud; pv_mmu_ops.set_pud = xen_set_pud;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment