Commit ee5c2ab0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  xen: don't drop NX bit
  xen: mask unwanted pte bits in __supported_pte_mask
  xen: Use wmb instead of rmb in xen_evtchn_do_upcall().
  x86: fix NULL pointer deref in __switch_to
parents f6837bfa ebb9cfe2
...@@ -1228,6 +1228,11 @@ asmlinkage void __init xen_start_kernel(void) ...@@ -1228,6 +1228,11 @@ asmlinkage void __init xen_start_kernel(void)
if (xen_feature(XENFEAT_supervisor_mode_kernel)) if (xen_feature(XENFEAT_supervisor_mode_kernel))
pv_info.kernel_rpl = 0; pv_info.kernel_rpl = 0;
/* Prevent unwanted bits from being set in PTEs. */
__supported_pte_mask &= ~_PAGE_GLOBAL;
if (!is_initial_xendomain())
__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
/* set the limit of our address space */ /* set the limit of our address space */
xen_reserve_top(); xen_reserve_top();
......
...@@ -179,48 +179,54 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -179,48 +179,54 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
preempt_enable(); preempt_enable();
} }
pteval_t xen_pte_val(pte_t pte) /* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{ {
pteval_t ret = pte.pte; if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
pteval_t flags = val & ~PTE_MASK;
val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
}
return val;
}
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
pteval_t flags = val & ~PTE_MASK;
val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
}
if (ret & _PAGE_PRESENT) return val;
ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; }
return ret; pteval_t xen_pte_val(pte_t pte)
{
return pte_mfn_to_pfn(pte.pte);
} }
pgdval_t xen_pgd_val(pgd_t pgd) pgdval_t xen_pgd_val(pgd_t pgd)
{ {
pgdval_t ret = pgd.pgd; return pte_mfn_to_pfn(pgd.pgd);
if (ret & _PAGE_PRESENT)
ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
return ret;
} }
pte_t xen_make_pte(pteval_t pte) pte_t xen_make_pte(pteval_t pte)
{ {
if (pte & _PAGE_PRESENT) { pte = pte_pfn_to_mfn(pte);
pte = phys_to_machine(XPADDR(pte)).maddr; return native_make_pte(pte);
pte &= ~(_PAGE_PCD | _PAGE_PWT);
}
return (pte_t){ .pte = pte };
} }
pgd_t xen_make_pgd(pgdval_t pgd) pgd_t xen_make_pgd(pgdval_t pgd)
{ {
if (pgd & _PAGE_PRESENT) pgd = pte_pfn_to_mfn(pgd);
pgd = phys_to_machine(XPADDR(pgd)).maddr; return native_make_pgd(pgd);
return (pgd_t){ pgd };
} }
pmdval_t xen_pmd_val(pmd_t pmd) pmdval_t xen_pmd_val(pmd_t pmd)
{ {
pmdval_t ret = native_pmd_val(pmd); return pte_mfn_to_pfn(pmd.pmd);
if (ret & _PAGE_PRESENT)
ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT;
return ret;
} }
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
void xen_set_pud(pud_t *ptr, pud_t val) void xen_set_pud(pud_t *ptr, pud_t val)
...@@ -267,9 +273,7 @@ void xen_pmd_clear(pmd_t *pmdp) ...@@ -267,9 +273,7 @@ void xen_pmd_clear(pmd_t *pmdp)
pmd_t xen_make_pmd(pmdval_t pmd) pmd_t xen_make_pmd(pmdval_t pmd)
{ {
if (pmd & _PAGE_PRESENT) pmd = pte_pfn_to_mfn(pmd);
pmd = phys_to_machine(XPADDR(pmd)).maddr;
return native_make_pmd(pmd); return native_make_pmd(pmd);
} }
#else /* !PAE */ #else /* !PAE */
......
...@@ -17,7 +17,7 @@ ENTRY(startup_xen) ...@@ -17,7 +17,7 @@ ENTRY(startup_xen)
__FINIT __FINIT
.pushsection .bss.page_aligned .pushsection .text
.align PAGE_SIZE_asm .align PAGE_SIZE_asm
ENTRY(hypercall_page) ENTRY(hypercall_page)
.skip 0x1000 .skip 0x1000
......
...@@ -176,7 +176,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) ...@@ -176,7 +176,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
* we set it now, so we can trap and pass that trap to the Guest if it * we set it now, so we can trap and pass that trap to the Guest if it
* uses the FPU. */ * uses the FPU. */
if (cpu->ts) if (cpu->ts)
lguest_set_ts(); unlazy_fpu(current);
/* SYSENTER is an optimized way of doing system calls. We can't allow /* SYSENTER is an optimized way of doing system calls. We can't allow
* it because it always jumps to privilege level 0. A normal Guest * it because it always jumps to privilege level 0. A normal Guest
...@@ -196,6 +196,10 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) ...@@ -196,6 +196,10 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
* trap made the switcher code come back, and an error code which some * trap made the switcher code come back, and an error code which some
* traps set. */ * traps set. */
/* Restore SYSENTER if it's supposed to be on. */
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
/* If the Guest page faulted, then the cr2 register will tell us the /* If the Guest page faulted, then the cr2 register will tell us the
* bad virtual address. We have to grab this now, because once we * bad virtual address. We have to grab this now, because once we
* re-enable interrupts an interrupt could fault and thus overwrite * re-enable interrupts an interrupt could fault and thus overwrite
...@@ -203,13 +207,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) ...@@ -203,13 +207,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
if (cpu->regs->trapnum == 14) if (cpu->regs->trapnum == 14)
cpu->arch.last_pagefault = read_cr2(); cpu->arch.last_pagefault = read_cr2();
/* Similarly, if we took a trap because the Guest used the FPU, /* Similarly, if we took a trap because the Guest used the FPU,
* we have to restore the FPU it expects to see. */ * we have to restore the FPU it expects to see.
* math_state_restore() may sleep and we may even move off to
* a different CPU. So all the critical stuff should be done
* before this. */
else if (cpu->regs->trapnum == 7) else if (cpu->regs->trapnum == 7)
math_state_restore(); math_state_restore();
/* Restore SYSENTER if it's supposed to be on. */
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
} }
/*H:130 Now we've examined the hypercall code; our Guest can make requests. /*H:130 Now we've examined the hypercall code; our Guest can make requests.
......
...@@ -529,7 +529,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) ...@@ -529,7 +529,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
/* Clear master flag /before/ clearing selector flag. */ /* Clear master flag /before/ clearing selector flag. */
rmb(); wmb();
#endif #endif
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
while (pending_words != 0) { while (pending_words != 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment