Commit acdd0b62 authored by Matias Zabaljauregui's avatar Matias Zabaljauregui Committed by Rusty Russell

lguest: PAE support

This version requires that host and guest have the same PAE status.
NX cap is not offered to the guest, yet.
Signed-off-by: default avatarMatias Zabaljauregui <zabaljauregui@gmail.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent cefcad17
...@@ -37,7 +37,6 @@ Running Lguest: ...@@ -37,7 +37,6 @@ Running Lguest:
"Paravirtualized guest support" = Y "Paravirtualized guest support" = Y
"Lguest guest support" = Y "Lguest guest support" = Y
"High Memory Support" = off/4GB "High Memory Support" = off/4GB
"PAE (Physical Address Extension) Support" = N
"Alignment value to which kernel should be aligned" = 0x100000 "Alignment value to which kernel should be aligned" = 0x100000
(CONFIG_PARAVIRT=y, CONFIG_LGUEST_GUEST=y, CONFIG_HIGHMEM64G=n and (CONFIG_PARAVIRT=y, CONFIG_LGUEST_GUEST=y, CONFIG_HIGHMEM64G=n and
CONFIG_PHYSICAL_ALIGN=0x100000) CONFIG_PHYSICAL_ALIGN=0x100000)
......
...@@ -17,8 +17,13 @@ ...@@ -17,8 +17,13 @@
/* Pages for switcher itself, then two pages per cpu */ /* Pages for switcher itself, then two pages per cpu */
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
/* We map at -4M for ease of mapping into the guest (one PTE page). */ /* We map at -4M (-2M when PAE is activated) for ease of mapping
* into the guest (one PTE page). */
#ifdef CONFIG_X86_PAE
#define SWITCHER_ADDR 0xFFE00000
#else
#define SWITCHER_ADDR 0xFFC00000 #define SWITCHER_ADDR 0xFFC00000
#endif
/* Found in switcher.S */ /* Found in switcher.S */
extern unsigned long default_idt_entries[]; extern unsigned long default_idt_entries[];
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#define LHCALL_TS 8 #define LHCALL_TS 8
#define LHCALL_SET_CLOCKEVENT 9 #define LHCALL_SET_CLOCKEVENT 9
#define LHCALL_HALT 10 #define LHCALL_HALT 10
#define LHCALL_SET_PMD 13
#define LHCALL_SET_PTE 14 #define LHCALL_SET_PTE 14
#define LHCALL_SET_PGD 15 #define LHCALL_SET_PGD 15
#define LHCALL_LOAD_TLS 16 #define LHCALL_LOAD_TLS 16
...@@ -33,7 +34,7 @@ ...@@ -33,7 +34,7 @@
* operations? There are two ways: the direct way is to make a "hypercall", * operations? There are two ways: the direct way is to make a "hypercall",
* to make requests of the Host Itself. * to make requests of the Host Itself.
* *
* We use the KVM hypercall mechanism. Eighteen hypercalls are * We use the KVM hypercall mechanism. Seventeen hypercalls are
* available: the hypercall number is put in the %eax register, and the * available: the hypercall number is put in the %eax register, and the
* arguments (when required) are placed in %ebx, %ecx, %edx and %esi. * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
* If a return value makes sense, it's returned in %eax. * If a return value makes sense, it's returned in %eax.
......
...@@ -2,7 +2,6 @@ config LGUEST_GUEST ...@@ -2,7 +2,6 @@ config LGUEST_GUEST
bool "Lguest guest support" bool "Lguest guest support"
select PARAVIRT select PARAVIRT
depends on X86_32 depends on X86_32
depends on !X86_PAE
select VIRTIO select VIRTIO
select VIRTIO_RING select VIRTIO_RING
select VIRTIO_CONSOLE select VIRTIO_CONSOLE
......
...@@ -167,6 +167,7 @@ static void lazy_hcall3(unsigned long call, ...@@ -167,6 +167,7 @@ static void lazy_hcall3(unsigned long call,
async_hcall(call, arg1, arg2, arg3, 0); async_hcall(call, arg1, arg2, arg3, 0);
} }
#ifdef CONFIG_X86_PAE
static void lazy_hcall4(unsigned long call, static void lazy_hcall4(unsigned long call,
unsigned long arg1, unsigned long arg1,
unsigned long arg2, unsigned long arg2,
...@@ -178,6 +179,7 @@ static void lazy_hcall4(unsigned long call, ...@@ -178,6 +179,7 @@ static void lazy_hcall4(unsigned long call,
else else
async_hcall(call, arg1, arg2, arg3, arg4); async_hcall(call, arg1, arg2, arg3, arg4);
} }
#endif
/* When lazy mode is turned off reset the per-cpu lazy mode variable and then /* When lazy mode is turned off reset the per-cpu lazy mode variable and then
* issue the do-nothing hypercall to flush any stored calls. */ * issue the do-nothing hypercall to flush any stored calls. */
...@@ -380,8 +382,8 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, ...@@ -380,8 +382,8 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
case 1: /* Basic feature request. */ case 1: /* Basic feature request. */
/* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */ /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
*cx &= 0x00002201; *cx &= 0x00002201;
/* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU. */ /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU, PAE. */
*dx &= 0x07808111; *dx &= 0x07808151;
/* The Host can do a nice optimization if it knows that the /* The Host can do a nice optimization if it knows that the
* kernel mappings (addresses above 0xC0000000 or whatever * kernel mappings (addresses above 0xC0000000 or whatever
* PAGE_OFFSET is set to) haven't changed. But Linux calls * PAGE_OFFSET is set to) haven't changed. But Linux calls
...@@ -400,6 +402,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, ...@@ -400,6 +402,11 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
if (*ax > 0x80000008) if (*ax > 0x80000008)
*ax = 0x80000008; *ax = 0x80000008;
break; break;
case 0x80000001:
/* Here we should fix nx cap depending on host. */
/* For this version of PAE, we just clear NX bit. */
*dx &= ~(1 << 20);
break;
} }
} }
...@@ -533,7 +540,12 @@ static void lguest_write_cr4(unsigned long val) ...@@ -533,7 +540,12 @@ static void lguest_write_cr4(unsigned long val)
static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
#ifdef CONFIG_X86_PAE
lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
ptep->pte_low, ptep->pte_high);
#else
lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
#endif
} }
static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
...@@ -543,15 +555,37 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -543,15 +555,37 @@ static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
lguest_pte_update(mm, addr, ptep); lguest_pte_update(mm, addr, ptep);
} }
/* The Guest calls this to set a top-level entry. Again, we set the entry then /* The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
* tell the Host which top-level page we changed, and the index of the entry we * to set a middle-level entry when PAE is activated.
* changed. */ * Again, we set the entry then tell the Host which page we changed,
* and the index of the entry we changed. */
#ifdef CONFIG_X86_PAE
static void lguest_set_pud(pud_t *pudp, pud_t pudval)
{
native_set_pud(pudp, pudval);
/* 32 bytes aligned pdpt address and the index. */
lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
(__pa(pudp) & 0x1F) / sizeof(pud_t));
}
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{
native_set_pmd(pmdp, pmdval);
lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
(__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
}
#else
/* The Guest calls lguest_set_pmd to set a top-level entry when PAE is not
* activated. */
static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
{ {
native_set_pmd(pmdp, pmdval); native_set_pmd(pmdp, pmdval);
lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK, lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
(__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t)); (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
} }
#endif
/* There are a couple of legacy places where the kernel sets a PTE, but we /* There are a couple of legacy places where the kernel sets a PTE, but we
* don't know the top level any more. This is useless for us, since we don't * don't know the top level any more. This is useless for us, since we don't
...@@ -569,6 +603,26 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) ...@@ -569,6 +603,26 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval)
lazy_hcall1(LHCALL_FLUSH_TLB, 1); lazy_hcall1(LHCALL_FLUSH_TLB, 1);
} }
#ifdef CONFIG_X86_PAE
static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
{
native_set_pte_atomic(ptep, pte);
if (cr3_changed)
lazy_hcall1(LHCALL_FLUSH_TLB, 1);
}
void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
native_pte_clear(mm, addr, ptep);
lguest_pte_update(mm, addr, ptep);
}
void lguest_pmd_clear(pmd_t *pmdp)
{
lguest_set_pmd(pmdp, __pmd(0));
}
#endif
/* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
* native page table operations. On native hardware you can set a new page * native page table operations. On native hardware you can set a new page
* table entry whenever you want, but if you want to remove one you have to do * table entry whenever you want, but if you want to remove one you have to do
...@@ -1035,6 +1089,7 @@ __init void lguest_init(void) ...@@ -1035,6 +1089,7 @@ __init void lguest_init(void)
pv_info.name = "lguest"; pv_info.name = "lguest";
pv_info.paravirt_enabled = 1; pv_info.paravirt_enabled = 1;
pv_info.kernel_rpl = 1; pv_info.kernel_rpl = 1;
pv_info.shared_kernel_pmd = 1;
/* We set up all the lguest overrides for sensitive operations. These /* We set up all the lguest overrides for sensitive operations. These
* are detailed with the operations themselves. */ * are detailed with the operations themselves. */
...@@ -1080,6 +1135,12 @@ __init void lguest_init(void) ...@@ -1080,6 +1135,12 @@ __init void lguest_init(void)
pv_mmu_ops.set_pte = lguest_set_pte; pv_mmu_ops.set_pte = lguest_set_pte;
pv_mmu_ops.set_pte_at = lguest_set_pte_at; pv_mmu_ops.set_pte_at = lguest_set_pte_at;
pv_mmu_ops.set_pmd = lguest_set_pmd; pv_mmu_ops.set_pmd = lguest_set_pmd;
#ifdef CONFIG_X86_PAE
pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
pv_mmu_ops.pte_clear = lguest_pte_clear;
pv_mmu_ops.pmd_clear = lguest_pmd_clear;
pv_mmu_ops.set_pud = lguest_set_pud;
#endif
pv_mmu_ops.read_cr2 = lguest_read_cr2; pv_mmu_ops.read_cr2 = lguest_read_cr2;
pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
......
config LGUEST config LGUEST
tristate "Linux hypervisor example code" tristate "Linux hypervisor example code"
depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX depends on X86_32 && EXPERIMENTAL && FUTEX
select HVC_DRIVER select HVC_DRIVER
---help--- ---help---
This is a very simple module which allows you to run This is a very simple module which allows you to run
......
...@@ -77,11 +77,21 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) ...@@ -77,11 +77,21 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
break; break;
case LHCALL_SET_PTE: case LHCALL_SET_PTE:
#ifdef CONFIG_X86_PAE
guest_set_pte(cpu, args->arg1, args->arg2,
__pte(args->arg3 | (u64)args->arg4 << 32));
#else
guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
#endif
break; break;
case LHCALL_SET_PGD: case LHCALL_SET_PGD:
guest_set_pgd(cpu->lg, args->arg1, args->arg2); guest_set_pgd(cpu->lg, args->arg1, args->arg2);
break; break;
#ifdef CONFIG_X86_PAE
case LHCALL_SET_PMD:
guest_set_pmd(cpu->lg, args->arg1, args->arg2);
break;
#endif
case LHCALL_SET_CLOCKEVENT: case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(cpu, args->arg1); guest_set_clockevent(cpu, args->arg1);
break; break;
......
...@@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); ...@@ -137,6 +137,8 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
* in the kernel. */ * in the kernel. */
#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK)
#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT)
/* interrupts_and_traps.c: */ /* interrupts_and_traps.c: */
unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more); unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
...@@ -170,6 +172,9 @@ int init_guest_pagetable(struct lguest *lg); ...@@ -170,6 +172,9 @@ int init_guest_pagetable(struct lguest *lg);
void free_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg);
void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i); void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i);
#ifdef CONFIG_X86_PAE
void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
#endif
void guest_pagetable_clear_all(struct lg_cpu *cpu); void guest_pagetable_clear_all(struct lg_cpu *cpu);
void guest_pagetable_flush_user(struct lg_cpu *cpu); void guest_pagetable_flush_user(struct lg_cpu *cpu);
void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
......
...@@ -53,6 +53,17 @@ ...@@ -53,6 +53,17 @@
* page. */ * page. */
#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
/* For PAE we need the PMD index as well. We use the last 2MB, so we
* will need the last pmd entry of the last pmd page. */
#ifdef CONFIG_X86_PAE
#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
#define RESERVE_MEM 2U
#define CHECK_GPGD_MASK _PAGE_PRESENT
#else
#define RESERVE_MEM 4U
#define CHECK_GPGD_MASK _PAGE_TABLE
#endif
/* We actually need a separate PTE page for each CPU. Remember that after the /* We actually need a separate PTE page for each CPU. Remember that after the
* Switcher code itself comes two pages for each CPU, and we don't want this * Switcher code itself comes two pages for each CPU, and we don't want this
* CPU's guest to see the pages of any other CPU. */ * CPU's guest to see the pages of any other CPU. */
...@@ -73,23 +84,58 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) ...@@ -73,23 +84,58 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
{ {
unsigned int index = pgd_index(vaddr); unsigned int index = pgd_index(vaddr);
#ifndef CONFIG_X86_PAE
/* We kill any Guest trying to touch the Switcher addresses. */ /* We kill any Guest trying to touch the Switcher addresses. */
if (index >= SWITCHER_PGD_INDEX) { if (index >= SWITCHER_PGD_INDEX) {
kill_guest(cpu, "attempt to access switcher pages"); kill_guest(cpu, "attempt to access switcher pages");
index = 0; index = 0;
} }
#endif
/* Return a pointer index'th pgd entry for the i'th page table. */ /* Return a pointer index'th pgd entry for the i'th page table. */
return &cpu->lg->pgdirs[i].pgdir[index]; return &cpu->lg->pgdirs[i].pgdir[index];
} }
#ifdef CONFIG_X86_PAE
/* This routine then takes the PGD entry given above, which contains the
* address of the PMD page. It then returns a pointer to the PMD entry for the
* given address. */
static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
{
unsigned int index = pmd_index(vaddr);
pmd_t *page;
/* We kill any Guest trying to touch the Switcher addresses. */
if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
index >= SWITCHER_PMD_INDEX) {
kill_guest(cpu, "attempt to access switcher pages");
index = 0;
}
/* You should never call this if the PGD entry wasn't valid */
BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
return &page[index];
}
#endif
/* This routine then takes the page directory entry returned above, which /* This routine then takes the page directory entry returned above, which
* contains the address of the page table entry (PTE) page. It then returns a * contains the address of the page table entry (PTE) page. It then returns a
* pointer to the PTE entry for the given address. */ * pointer to the PTE entry for the given address. */
static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
{ {
#ifdef CONFIG_X86_PAE
pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
/* You should never call this if the PMD entry wasn't valid */
BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
#else
pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
/* You should never call this if the PGD entry wasn't valid */ /* You should never call this if the PGD entry wasn't valid */
BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
#endif
return &page[pte_index(vaddr)]; return &page[pte_index(vaddr)];
} }
...@@ -101,10 +147,31 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) ...@@ -101,10 +147,31 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
} }
static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) #ifdef CONFIG_X86_PAE
static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
{ {
unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
return gpage + pmd_index(vaddr) * sizeof(pmd_t);
}
#endif
static unsigned long gpte_addr(struct lg_cpu *cpu,
pgd_t gpgd, unsigned long vaddr)
{
#ifdef CONFIG_X86_PAE
pmd_t gpmd;
#endif
unsigned long gpage;
BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
#ifdef CONFIG_X86_PAE
gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
#else
gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
#endif
return gpage + pte_index(vaddr) * sizeof(pte_t); return gpage + pte_index(vaddr) * sizeof(pte_t);
} }
/*:*/ /*:*/
...@@ -184,11 +251,20 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte) ...@@ -184,11 +251,20 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte)
static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
{ {
if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
(pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) (pgd_pfn(gpgd) >= cpu->lg->pfn_limit))
kill_guest(cpu, "bad page directory entry"); kill_guest(cpu, "bad page directory entry");
} }
#ifdef CONFIG_X86_PAE
static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
{
if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
(pmd_pfn(gpmd) >= cpu->lg->pfn_limit))
kill_guest(cpu, "bad page middle directory entry");
}
#endif
/*H:330 /*H:330
* (i) Looking up a page table entry when the Guest faults. * (i) Looking up a page table entry when the Guest faults.
* *
...@@ -207,6 +283,11 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) ...@@ -207,6 +283,11 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
pte_t gpte; pte_t gpte;
pte_t *spte; pte_t *spte;
#ifdef CONFIG_X86_PAE
pmd_t *spmd;
pmd_t gpmd;
#endif
/* First step: get the top-level Guest page table entry. */ /* First step: get the top-level Guest page table entry. */
gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
/* Toplevel not present? We can't map it in. */ /* Toplevel not present? We can't map it in. */
...@@ -228,12 +309,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) ...@@ -228,12 +309,40 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
check_gpgd(cpu, gpgd); check_gpgd(cpu, gpgd);
/* And we copy the flags to the shadow PGD entry. The page /* And we copy the flags to the shadow PGD entry. The page
* number in the shadow PGD is the page we just allocated. */ * number in the shadow PGD is the page we just allocated. */
*spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd)));
} }
#ifdef CONFIG_X86_PAE
gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
/* middle level not present? We can't map it in. */
if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
return false;
/* Now look at the matching shadow entry. */
spmd = spmd_addr(cpu, *spgd, vaddr);
if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
/* No shadow entry: allocate a new shadow PTE page. */
unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
/* This is not really the Guest's fault, but killing it is
* simple for this corner case. */
if (!ptepage) {
kill_guest(cpu, "out of memory allocating pte page");
return false;
}
/* We check that the Guest pmd is OK. */
check_gpmd(cpu, gpmd);
/* And we copy the flags to the shadow PMD entry. The page
* number in the shadow PMD is the page we just allocated. */
native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd)));
}
#endif
/* OK, now we look at the lower level in the Guest page table: keep its /* OK, now we look at the lower level in the Guest page table: keep its
* address, because we might update it later. */ * address, because we might update it later. */
gpte_ptr = gpte_addr(gpgd, vaddr); gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
gpte = lgread(cpu, gpte_ptr, pte_t); gpte = lgread(cpu, gpte_ptr, pte_t);
/* If this page isn't in the Guest page tables, we can't page it in. */ /* If this page isn't in the Guest page tables, we can't page it in. */
...@@ -259,7 +368,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) ...@@ -259,7 +368,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
gpte = pte_mkdirty(gpte); gpte = pte_mkdirty(gpte);
/* Get the pointer to the shadow PTE entry we're going to set. */ /* Get the pointer to the shadow PTE entry we're going to set. */
spte = spte_addr(*spgd, vaddr); spte = spte_addr(cpu, *spgd, vaddr);
/* If there was a valid shadow PTE entry here before, we release it. /* If there was a valid shadow PTE entry here before, we release it.
* This can happen with a write to a previously read-only entry. */ * This can happen with a write to a previously read-only entry. */
release_pte(*spte); release_pte(*spte);
...@@ -301,14 +410,23 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) ...@@ -301,14 +410,23 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
pgd_t *spgd; pgd_t *spgd;
unsigned long flags; unsigned long flags;
#ifdef CONFIG_X86_PAE
pmd_t *spmd;
#endif
/* Look at the current top level entry: is it present? */ /* Look at the current top level entry: is it present? */
spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
return false; return false;
#ifdef CONFIG_X86_PAE
spmd = spmd_addr(cpu, *spgd, vaddr);
if (!(pmd_flags(*spmd) & _PAGE_PRESENT))
return false;
#endif
/* Check the flags on the pte entry itself: it must be present and /* Check the flags on the pte entry itself: it must be present and
* writable. */ * writable. */
flags = pte_flags(*(spte_addr(*spgd, vaddr))); flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
} }
...@@ -322,6 +440,41 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) ...@@ -322,6 +440,41 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
kill_guest(cpu, "bad stack page %#lx", vaddr); kill_guest(cpu, "bad stack page %#lx", vaddr);
} }
#ifdef CONFIG_X86_PAE
static void release_pmd(pmd_t *spmd)
{
/* If the entry's not present, there's nothing to release. */
if (pmd_flags(*spmd) & _PAGE_PRESENT) {
unsigned int i;
pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
/* For each entry in the page, we might need to release it. */
for (i = 0; i < PTRS_PER_PTE; i++)
release_pte(ptepage[i]);
/* Now we can free the page of PTEs */
free_page((long)ptepage);
/* And zero out the PMD entry so we never release it twice. */
native_set_pmd(spmd, __pmd(0));
}
}
static void release_pgd(pgd_t *spgd)
{
/* If the entry's not present, there's nothing to release. */
if (pgd_flags(*spgd) & _PAGE_PRESENT) {
unsigned int i;
pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
for (i = 0; i < PTRS_PER_PMD; i++)
release_pmd(&pmdpage[i]);
/* Now we can free the page of PMDs */
free_page((long)pmdpage);
/* And zero out the PGD entry so we never release it twice. */
set_pgd(spgd, __pgd(0));
}
}
#else /* !CONFIG_X86_PAE */
/*H:450 If we chase down the release_pgd() code, it looks like this: */ /*H:450 If we chase down the release_pgd() code, it looks like this: */
static void release_pgd(pgd_t *spgd) static void release_pgd(pgd_t *spgd)
{ {
...@@ -341,7 +494,7 @@ static void release_pgd(pgd_t *spgd) ...@@ -341,7 +494,7 @@ static void release_pgd(pgd_t *spgd)
*spgd = __pgd(0); *spgd = __pgd(0);
} }
} }
#endif
/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
* hypercall and once in new_pgdir() when we re-used a top-level pgdir page. * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
* It simply releases every PTE page from 0 up to the Guest's kernel address. */ * It simply releases every PTE page from 0 up to the Guest's kernel address. */
...@@ -370,6 +523,9 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) ...@@ -370,6 +523,9 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
pgd_t gpgd; pgd_t gpgd;
pte_t gpte; pte_t gpte;
#ifdef CONFIG_X86_PAE
pmd_t gpmd;
#endif
/* First step: get the top-level Guest page table entry. */ /* First step: get the top-level Guest page table entry. */
gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
/* Toplevel not present? We can't map it in. */ /* Toplevel not present? We can't map it in. */
...@@ -378,7 +534,13 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) ...@@ -378,7 +534,13 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
return -1UL; return -1UL;
} }
gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
#ifdef CONFIG_X86_PAE
gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
kill_guest(cpu, "Bad address %#lx", vaddr);
#endif
gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
if (!(pte_flags(gpte) & _PAGE_PRESENT)) if (!(pte_flags(gpte) & _PAGE_PRESENT))
kill_guest(cpu, "Bad address %#lx", vaddr); kill_guest(cpu, "Bad address %#lx", vaddr);
...@@ -405,6 +567,9 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, ...@@ -405,6 +567,9 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
int *blank_pgdir) int *blank_pgdir)
{ {
unsigned int next; unsigned int next;
#ifdef CONFIG_X86_PAE
pmd_t *pmd_table;
#endif
/* We pick one entry at random to throw out. Choosing the Least /* We pick one entry at random to throw out. Choosing the Least
* Recently Used might be better, but this is easy. */ * Recently Used might be better, but this is easy. */
...@@ -416,11 +581,28 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, ...@@ -416,11 +581,28 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
/* If the allocation fails, just keep using the one we have */ /* If the allocation fails, just keep using the one we have */
if (!cpu->lg->pgdirs[next].pgdir) if (!cpu->lg->pgdirs[next].pgdir)
next = cpu->cpu_pgd; next = cpu->cpu_pgd;
else else {
#ifdef CONFIG_X86_PAE
/* In PAE mode, allocate a pmd page and populate the
* last pgd entry. */
pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
if (!pmd_table) {
free_page((long)cpu->lg->pgdirs[next].pgdir);
set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
next = cpu->cpu_pgd;
} else {
set_pgd(cpu->lg->pgdirs[next].pgdir +
SWITCHER_PGD_INDEX,
__pgd(__pa(pmd_table) | _PAGE_PRESENT));
/* This is a blank page, so there are no kernel /* This is a blank page, so there are no kernel
* mappings: caller must map the stack! */ * mappings: caller must map the stack! */
*blank_pgdir = 1; *blank_pgdir = 1;
} }
#else
*blank_pgdir = 1;
#endif
}
}
/* Record which Guest toplevel this shadows. */ /* Record which Guest toplevel this shadows. */
cpu->lg->pgdirs[next].gpgdir = gpgdir; cpu->lg->pgdirs[next].gpgdir = gpgdir;
/* Release all the non-kernel mappings. */ /* Release all the non-kernel mappings. */
...@@ -460,10 +642,25 @@ static void release_all_pagetables(struct lguest *lg) ...@@ -460,10 +642,25 @@ static void release_all_pagetables(struct lguest *lg)
/* Every shadow pagetable this Guest has */ /* Every shadow pagetable this Guest has */
for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
if (lg->pgdirs[i].pgdir) if (lg->pgdirs[i].pgdir) {
#ifdef CONFIG_X86_PAE
pgd_t *spgd;
pmd_t *pmdpage;
unsigned int k;
/* Get the last pmd page. */
spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX;
pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
/* And release the pmd entries of that pmd page,
* except for the switcher pmd. */
for (k = 0; k < SWITCHER_PMD_INDEX; k++)
release_pmd(&pmdpage[k]);
#endif
/* Every PGD entry except the Switcher at the top */ /* Every PGD entry except the Switcher at the top */
for (j = 0; j < SWITCHER_PGD_INDEX; j++) for (j = 0; j < SWITCHER_PGD_INDEX; j++)
release_pgd(lg->pgdirs[i].pgdir + j); release_pgd(lg->pgdirs[i].pgdir + j);
}
} }
/* We also throw away everything when a Guest tells us it's changed a kernel /* We also throw away everything when a Guest tells us it's changed a kernel
...@@ -504,24 +701,37 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, ...@@ -504,24 +701,37 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,
{ {
/* Look up the matching shadow page directory entry. */ /* Look up the matching shadow page directory entry. */
pgd_t *spgd = spgd_addr(cpu, idx, vaddr); pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
#ifdef CONFIG_X86_PAE
pmd_t *spmd;
#endif
/* If the top level isn't present, there's no entry to update. */ /* If the top level isn't present, there's no entry to update. */
if (pgd_flags(*spgd) & _PAGE_PRESENT) { if (pgd_flags(*spgd) & _PAGE_PRESENT) {
/* Otherwise, we start by releasing the existing entry. */ #ifdef CONFIG_X86_PAE
pte_t *spte = spte_addr(*spgd, vaddr); spmd = spmd_addr(cpu, *spgd, vaddr);
if (pmd_flags(*spmd) & _PAGE_PRESENT) {
#endif
/* Otherwise, we start by releasing
* the existing entry. */
pte_t *spte = spte_addr(cpu, *spgd, vaddr);
release_pte(*spte); release_pte(*spte);
/* If they're setting this entry as dirty or accessed, we might /* If they're setting this entry as dirty or accessed,
* as well put that entry they've given us in now. This shaves * we might as well put that entry they've given us
* 10% off a copy-on-write micro-benchmark. */ * in now. This shaves 10% off a
* copy-on-write micro-benchmark. */
if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
check_gpte(cpu, gpte); check_gpte(cpu, gpte);
*spte = gpte_to_spte(cpu, gpte, native_set_pte(spte,
pte_flags(gpte) & _PAGE_DIRTY); gpte_to_spte(cpu, gpte,
pte_flags(gpte) & _PAGE_DIRTY));
} else } else
/* Otherwise kill it and we can demand_page() it in /* Otherwise kill it and we can demand_page()
* later. */ * it in later. */
*spte = __pte(0); native_set_pte(spte, __pte(0));
#ifdef CONFIG_X86_PAE
}
#endif
} }
} }
...@@ -572,8 +782,6 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) ...@@ -572,8 +782,6 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
{ {
int pgdir; int pgdir;
/* The kernel seems to try to initialize this early on: we ignore its
* attempts to map over the Switcher. */
if (idx >= SWITCHER_PGD_INDEX) if (idx >= SWITCHER_PGD_INDEX)
return; return;
...@@ -583,6 +791,12 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) ...@@ -583,6 +791,12 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
/* ... throw it away. */ /* ... throw it away. */
release_pgd(lg->pgdirs[pgdir].pgdir + idx); release_pgd(lg->pgdirs[pgdir].pgdir + idx);
} }
#ifdef CONFIG_X86_PAE
void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
{
guest_pagetable_clear_all(&lg->cpus[0]);
}
#endif
/* Once we know how much memory we have we can construct simple identity /* Once we know how much memory we have we can construct simple identity
* (which set virtual == physical) and linear mappings * (which set virtual == physical) and linear mappings
...@@ -596,8 +810,16 @@ static unsigned long setup_pagetables(struct lguest *lg, ...@@ -596,8 +810,16 @@ static unsigned long setup_pagetables(struct lguest *lg,
{ {
pgd_t __user *pgdir; pgd_t __user *pgdir;
pte_t __user *linear; pte_t __user *linear;
unsigned int mapped_pages, i, linear_pages, phys_linear;
unsigned long mem_base = (unsigned long)lg->mem_base; unsigned long mem_base = (unsigned long)lg->mem_base;
unsigned int mapped_pages, i, linear_pages;
#ifdef CONFIG_X86_PAE
pmd_t __user *pmds;
unsigned int j;
pgd_t pgd;
pmd_t pmd;
#else
unsigned int phys_linear;
#endif
/* We have mapped_pages frames to map, so we need /* We have mapped_pages frames to map, so we need
* linear_pages page tables to map them. */ * linear_pages page tables to map them. */
...@@ -610,6 +832,9 @@ static unsigned long setup_pagetables(struct lguest *lg, ...@@ -610,6 +832,9 @@ static unsigned long setup_pagetables(struct lguest *lg,
/* Now we use the next linear_pages pages as pte pages */ /* Now we use the next linear_pages pages as pte pages */
linear = (void *)pgdir - linear_pages * PAGE_SIZE; linear = (void *)pgdir - linear_pages * PAGE_SIZE;
#ifdef CONFIG_X86_PAE
pmds = (void *)linear - PAGE_SIZE;
#endif
/* Linear mapping is easy: put every page's address into the /* Linear mapping is easy: put every page's address into the
* mapping in order. */ * mapping in order. */
for (i = 0; i < mapped_pages; i++) { for (i = 0; i < mapped_pages; i++) {
...@@ -621,6 +846,22 @@ static unsigned long setup_pagetables(struct lguest *lg, ...@@ -621,6 +846,22 @@ static unsigned long setup_pagetables(struct lguest *lg,
/* The top level points to the linear page table pages above. /* The top level points to the linear page table pages above.
* We setup the identity and linear mappings here. */ * We setup the identity and linear mappings here. */
#ifdef CONFIG_X86_PAE
for (i = 0, j; i < mapped_pages && j < PTRS_PER_PMD;
i += PTRS_PER_PTE, j++) {
native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i)
- mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0)
return -EFAULT;
}
set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT));
if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
return -EFAULT;
if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0)
return -EFAULT;
#else
phys_linear = (unsigned long)linear - mem_base; phys_linear = (unsigned long)linear - mem_base;
for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
pgd_t pgd; pgd_t pgd;
...@@ -633,6 +874,7 @@ static unsigned long setup_pagetables(struct lguest *lg, ...@@ -633,6 +874,7 @@ static unsigned long setup_pagetables(struct lguest *lg,
&pgd, sizeof(pgd))) &pgd, sizeof(pgd)))
return -EFAULT; return -EFAULT;
} }
#endif
/* We return the top level (guest-physical) address: remember where /* We return the top level (guest-physical) address: remember where
* this is. */ * this is. */
...@@ -648,7 +890,10 @@ int init_guest_pagetable(struct lguest *lg) ...@@ -648,7 +890,10 @@ int init_guest_pagetable(struct lguest *lg)
u64 mem; u64 mem;
u32 initrd_size; u32 initrd_size;
struct boot_params __user *boot = (struct boot_params *)lg->mem_base; struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
#ifdef CONFIG_X86_PAE
pgd_t *pgd;
pmd_t *pmd_table;
#endif
/* Get the Guest memory size and the ramdisk size from the boot header /* Get the Guest memory size and the ramdisk size from the boot header
* located at lg->mem_base (Guest address 0). */ * located at lg->mem_base (Guest address 0). */
if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
...@@ -663,6 +908,15 @@ int init_guest_pagetable(struct lguest *lg) ...@@ -663,6 +908,15 @@ int init_guest_pagetable(struct lguest *lg)
lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
if (!lg->pgdirs[0].pgdir) if (!lg->pgdirs[0].pgdir)
return -ENOMEM; return -ENOMEM;
#ifdef CONFIG_X86_PAE
pgd = lg->pgdirs[0].pgdir;
pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
if (!pmd_table)
return -ENOMEM;
set_pgd(pgd + SWITCHER_PGD_INDEX,
__pgd(__pa(pmd_table) | _PAGE_PRESENT));
#endif
lg->cpus[0].cpu_pgd = 0; lg->cpus[0].cpu_pgd = 0;
return 0; return 0;
} }
...@@ -673,16 +927,23 @@ void page_table_guest_data_init(struct lg_cpu *cpu) ...@@ -673,16 +927,23 @@ void page_table_guest_data_init(struct lg_cpu *cpu)
/* We get the kernel address: above this is all kernel memory. */ /* We get the kernel address: above this is all kernel memory. */
if (get_user(cpu->lg->kernel_address, if (get_user(cpu->lg->kernel_address,
&cpu->lg->lguest_data->kernel_address) &cpu->lg->lguest_data->kernel_address)
/* We tell the Guest that it can't use the top 4MB of virtual /* We tell the Guest that it can't use the top 2 or 4 MB
* addresses used by the Switcher. */ * of virtual addresses used by the Switcher. */
|| put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem) || put_user(RESERVE_MEM * 1024 * 1024,
|| put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) &cpu->lg->lguest_data->reserve_mem)
|| put_user(cpu->lg->pgdirs[0].gpgdir,
&cpu->lg->lguest_data->pgdir))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
/* In flush_user_mappings() we loop from 0 to /* In flush_user_mappings() we loop from 0 to
* "pgd_index(lg->kernel_address)". This assumes it won't hit the * "pgd_index(lg->kernel_address)". This assumes it won't hit the
* Switcher mappings, so check that now. */ * Switcher mappings, so check that now. */
#ifdef CONFIG_X86_PAE
if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
#else
if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
#endif
kill_guest(cpu, "bad kernel address %#lx", kill_guest(cpu, "bad kernel address %#lx",
cpu->lg->kernel_address); cpu->lg->kernel_address);
} }
...@@ -708,16 +969,30 @@ void free_guest_pagetable(struct lguest *lg) ...@@ -708,16 +969,30 @@ void free_guest_pagetable(struct lguest *lg)
void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
{ {
pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
pgd_t switcher_pgd;
pte_t regs_pte; pte_t regs_pte;
unsigned long pfn; unsigned long pfn;
#ifdef CONFIG_X86_PAE
pmd_t switcher_pmd;
pmd_t *pmd_table;
native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >>
PAGE_SHIFT, PAGE_KERNEL_EXEC));
pmd_table = __va(pgd_pfn(cpu->lg->
pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX])
<< PAGE_SHIFT);
native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
#else
pgd_t switcher_pgd;
/* Make the last PGD entry for this Guest point to the Switcher's PTE /* Make the last PGD entry for this Guest point to the Switcher's PTE
* page for this CPU (with appropriate flags). */ * page for this CPU (with appropriate flags). */
switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC);
cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
#endif
/* We also change the Switcher PTE page. When we're running the Guest, /* We also change the Switcher PTE page. When we're running the Guest,
* we want the Guest's "regs" page to appear where the first Switcher * we want the Guest's "regs" page to appear where the first Switcher
* page for this CPU is. This is an optimization: when the Switcher * page for this CPU is. This is an optimization: when the Switcher
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment