Commit ee4a9251 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-paravirt-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 paravirt cleanup from Ingo Molnar:
 "Clean up the paravirt code after the removal of 32-bit Xen PV support"

* tag 'x86-paravirt-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/paravirt: Avoid needless paravirt step clearing page table entries
  x86/paravirt: Remove set_pte_at() pv-op
  x86/entry/32: Simplify CONFIG_XEN_PV build dependency
  x86/paravirt: Use CONFIG_PARAVIRT_XXL instead of CONFIG_PARAVIRT
  x86/paravirt: Clean up paravirt macros
  x86/paravirt: Remove 32-bit support from CONFIG_PARAVIRT_XXL
parents ad884ff3 7c9f80cb
...@@ -46,13 +46,13 @@ ...@@ -46,13 +46,13 @@
.code64 .code64
.section .entry.text, "ax" .section .entry.text, "ax"
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT_XXL
SYM_CODE_START(native_usergs_sysret64) SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
swapgs swapgs
sysretq sysretq
SYM_CODE_END(native_usergs_sysret64) SYM_CODE_END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT_XXL */
/* /*
* 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#undef CONFIG_ILLEGAL_POINTER_VALUE #undef CONFIG_ILLEGAL_POINTER_VALUE
#undef CONFIG_SPARSEMEM_VMEMMAP #undef CONFIG_SPARSEMEM_VMEMMAP
#undef CONFIG_NR_CPUS #undef CONFIG_NR_CPUS
#undef CONFIG_PARAVIRT_XXL
#define CONFIG_X86_32 1 #define CONFIG_X86_32 1
#define CONFIG_PGTABLE_LEVELS 2 #define CONFIG_PGTABLE_LEVELS 2
......
...@@ -99,7 +99,7 @@ enum fixed_addresses { ...@@ -99,7 +99,7 @@ enum fixed_addresses {
FIX_PCIE_MCFG, FIX_PCIE_MCFG,
#endif #endif
#endif #endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT_XXL
FIX_PARAVIRT_BOOTMAP, FIX_PARAVIRT_BOOTMAP,
#endif #endif
#ifdef CONFIG_X86_INTEL_MID #ifdef CONFIG_X86_INTEL_MID
......
...@@ -547,7 +547,7 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check); ...@@ -547,7 +547,7 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
/* NMI */ /* NMI */
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi); DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64) #ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi); DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
#endif #endif
...@@ -557,7 +557,7 @@ DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug); ...@@ -557,7 +557,7 @@ DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug);
#else #else
DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug); DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug);
#endif #endif
#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64) #ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug); DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug);
#endif #endif
......
...@@ -160,8 +160,6 @@ static inline void wbinvd(void) ...@@ -160,8 +160,6 @@ static inline void wbinvd(void)
PVOP_VCALL0(cpu.wbinvd); PVOP_VCALL0(cpu.wbinvd);
} }
#define get_kernel_rpl() (pv_info.kernel_rpl)
static inline u64 paravirt_read_msr(unsigned msr) static inline u64 paravirt_read_msr(unsigned msr)
{ {
return PVOP_CALL1(u64, cpu.read_msr, msr); return PVOP_CALL1(u64, cpu.read_msr, msr);
...@@ -277,12 +275,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu) ...@@ -277,12 +275,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu)
PVOP_VCALL2(cpu.load_tls, t, cpu); PVOP_VCALL2(cpu.load_tls, t, cpu);
} }
#ifdef CONFIG_X86_64
static inline void load_gs_index(unsigned int gs) static inline void load_gs_index(unsigned int gs)
{ {
PVOP_VCALL1(cpu.load_gs_index, gs); PVOP_VCALL1(cpu.load_gs_index, gs);
} }
#endif
static inline void write_ldt_entry(struct desc_struct *dt, int entry, static inline void write_ldt_entry(struct desc_struct *dt, int entry,
const void *desc) const void *desc)
...@@ -375,52 +371,22 @@ static inline void paravirt_release_p4d(unsigned long pfn) ...@@ -375,52 +371,22 @@ static inline void paravirt_release_p4d(unsigned long pfn)
static inline pte_t __pte(pteval_t val) static inline pte_t __pte(pteval_t val)
{ {
pteval_t ret; return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
else
ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
return (pte_t) { .pte = ret };
} }
static inline pteval_t pte_val(pte_t pte) static inline pteval_t pte_val(pte_t pte)
{ {
pteval_t ret; return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
if (sizeof(pteval_t) > sizeof(long))
ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
pte.pte, (u64)pte.pte >> 32);
else
ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
return ret;
} }
static inline pgd_t __pgd(pgdval_t val) static inline pgd_t __pgd(pgdval_t val)
{ {
pgdval_t ret; return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
else
ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
return (pgd_t) { ret };
} }
static inline pgdval_t pgd_val(pgd_t pgd) static inline pgdval_t pgd_val(pgd_t pgd)
{ {
pgdval_t ret; return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
if (sizeof(pgdval_t) > sizeof(long))
ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
pgd.pgd, (u64)pgd.pgd >> 32);
else
ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
return ret;
} }
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
...@@ -438,78 +404,34 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned ...@@ -438,78 +404,34 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned
pte_t *ptep, pte_t old_pte, pte_t pte) pte_t *ptep, pte_t old_pte, pte_t pte)
{ {
if (sizeof(pteval_t) > sizeof(long)) PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
/* 5 arg words */
pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
else
PVOP_VCALL4(mmu.ptep_modify_prot_commit,
vma, addr, ptep, pte.pte);
} }
static inline void set_pte(pte_t *ptep, pte_t pte) static inline void set_pte(pte_t *ptep, pte_t pte)
{ {
if (sizeof(pteval_t) > sizeof(long)) PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
else
PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
}
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (sizeof(pteval_t) > sizeof(long))
/* 5 arg words */
pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
else
PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
} }
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{ {
pmdval_t val = native_pmd_val(pmd); PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
if (sizeof(pmdval_t) > sizeof(long))
PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
else
PVOP_VCALL2(mmu.set_pmd, pmdp, val);
} }
#if CONFIG_PGTABLE_LEVELS >= 3
static inline pmd_t __pmd(pmdval_t val) static inline pmd_t __pmd(pmdval_t val)
{ {
pmdval_t ret; return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
else
ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
return (pmd_t) { ret };
} }
static inline pmdval_t pmd_val(pmd_t pmd) static inline pmdval_t pmd_val(pmd_t pmd)
{ {
pmdval_t ret; return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
if (sizeof(pmdval_t) > sizeof(long))
ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
pmd.pmd, (u64)pmd.pmd >> 32);
else
ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
return ret;
} }
static inline void set_pud(pud_t *pudp, pud_t pud) static inline void set_pud(pud_t *pudp, pud_t pud)
{ {
pudval_t val = native_pud_val(pud); PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
if (sizeof(pudval_t) > sizeof(long))
PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
else
PVOP_VCALL2(mmu.set_pud, pudp, val);
} }
#if CONFIG_PGTABLE_LEVELS >= 4
static inline pud_t __pud(pudval_t val) static inline pud_t __pud(pudval_t val)
{ {
pudval_t ret; pudval_t ret;
...@@ -526,7 +448,7 @@ static inline pudval_t pud_val(pud_t pud) ...@@ -526,7 +448,7 @@ static inline pudval_t pud_val(pud_t pud)
static inline void pud_clear(pud_t *pudp) static inline void pud_clear(pud_t *pudp)
{ {
set_pud(pudp, __pud(0)); set_pud(pudp, native_make_pud(0));
} }
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
...@@ -563,40 +485,17 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) ...@@ -563,40 +485,17 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
} while (0) } while (0)
#define pgd_clear(pgdp) do { \ #define pgd_clear(pgdp) do { \
if (pgtable_l5_enabled()) \ if (pgtable_l5_enabled()) \
set_pgd(pgdp, __pgd(0)); \ set_pgd(pgdp, native_make_pgd(0)); \
} while (0) } while (0)
#endif /* CONFIG_PGTABLE_LEVELS == 5 */ #endif /* CONFIG_PGTABLE_LEVELS == 5 */
static inline void p4d_clear(p4d_t *p4dp) static inline void p4d_clear(p4d_t *p4dp)
{ {
set_p4d(p4dp, __p4d(0)); set_p4d(p4dp, native_make_p4d(0));
} }
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
#ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a
64-bit pte atomically */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
}
static inline void pmd_clear(pmd_t *pmdp)
{
PVOP_VCALL1(mmu.pmd_clear, pmdp);
}
#else /* !CONFIG_X86_PAE */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte) static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{ {
set_pte(ptep, pte); set_pte(ptep, pte);
...@@ -605,14 +504,13 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte) ...@@ -605,14 +504,13 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
set_pte_at(mm, addr, ptep, __pte(0)); set_pte(ptep, native_make_pte(0));
} }
static inline void pmd_clear(pmd_t *pmdp) static inline void pmd_clear(pmd_t *pmdp)
{ {
set_pmd(pmdp, __pmd(0)); set_pmd(pmdp, native_make_pmd(0));
} }
#endif /* CONFIG_X86_PAE */
#define __HAVE_ARCH_START_CONTEXT_SWITCH #define __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(struct task_struct *prev) static inline void arch_start_context_switch(struct task_struct *prev)
...@@ -682,16 +580,9 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); ...@@ -682,16 +580,9 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
#endif /* SMP && PARAVIRT_SPINLOCKS */ #endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
/* save and restore all caller-save registers, except return value */ /* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else #else
/* save and restore all caller-save registers, except return value */ /* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS \ #define PV_SAVE_ALL_CALLER_REGS \
...@@ -712,14 +603,6 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); ...@@ -712,14 +603,6 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
"pop %rsi;" \ "pop %rsi;" \
"pop %rdx;" \ "pop %rdx;" \
"pop %rcx;" "pop %rcx;"
/* We save some registers, but all of them, that's too much. We clobber all
* caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
#define PV_RESTORE_REGS "popq %%rdi;"
#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
#define PV_FLAGS_ARG "D"
#endif #endif
/* /*
......
...@@ -68,12 +68,7 @@ struct paravirt_callee_save { ...@@ -68,12 +68,7 @@ struct paravirt_callee_save {
/* general info */ /* general info */
struct pv_info { struct pv_info {
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL
unsigned int kernel_rpl;
int shared_kernel_pmd;
#ifdef CONFIG_X86_64
u16 extra_user_64bit_cs; /* __USER_CS if none */ u16 extra_user_64bit_cs; /* __USER_CS if none */
#endif
#endif #endif
const char *name; const char *name;
...@@ -126,9 +121,7 @@ struct pv_cpu_ops { ...@@ -126,9 +121,7 @@ struct pv_cpu_ops {
void (*set_ldt)(const void *desc, unsigned entries); void (*set_ldt)(const void *desc, unsigned entries);
unsigned long (*store_tr)(void); unsigned long (*store_tr)(void);
void (*load_tls)(struct thread_struct *t, unsigned int cpu); void (*load_tls)(struct thread_struct *t, unsigned int cpu);
#ifdef CONFIG_X86_64
void (*load_gs_index)(unsigned int idx); void (*load_gs_index)(unsigned int idx);
#endif
void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
const void *desc); const void *desc);
void (*write_gdt_entry)(struct desc_struct *, void (*write_gdt_entry)(struct desc_struct *,
...@@ -249,8 +242,6 @@ struct pv_mmu_ops { ...@@ -249,8 +242,6 @@ struct pv_mmu_ops {
/* Pagetable manipulation functions */ /* Pagetable manipulation functions */
void (*set_pte)(pte_t *ptep, pte_t pteval); void (*set_pte)(pte_t *ptep, pte_t pteval);
void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval);
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
...@@ -264,21 +255,11 @@ struct pv_mmu_ops { ...@@ -264,21 +255,11 @@ struct pv_mmu_ops {
struct paravirt_callee_save pgd_val; struct paravirt_callee_save pgd_val;
struct paravirt_callee_save make_pgd; struct paravirt_callee_save make_pgd;
#if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
void (*pmd_clear)(pmd_t *pmdp);
#endif /* CONFIG_X86_PAE */
void (*set_pud)(pud_t *pudp, pud_t pudval); void (*set_pud)(pud_t *pudp, pud_t pudval);
struct paravirt_callee_save pmd_val; struct paravirt_callee_save pmd_val;
struct paravirt_callee_save make_pmd; struct paravirt_callee_save make_pmd;
#if CONFIG_PGTABLE_LEVELS >= 4
struct paravirt_callee_save pud_val; struct paravirt_callee_save pud_val;
struct paravirt_callee_save make_pud; struct paravirt_callee_save make_pud;
...@@ -291,10 +272,6 @@ struct pv_mmu_ops { ...@@ -291,10 +272,6 @@ struct pv_mmu_ops {
void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
#endif /* CONFIG_PGTABLE_LEVELS >= 4 */
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
struct pv_lazy_ops lazy_mode; struct pv_lazy_ops lazy_mode;
/* dom0 ops */ /* dom0 ops */
......
...@@ -20,12 +20,7 @@ typedef union { ...@@ -20,12 +20,7 @@ typedef union {
} pte_t; } pte_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PARAVIRT_XXL
#define SHARED_KERNEL_PMD ((!static_cpu_has(X86_FEATURE_PTI) && \
(pv_info.shared_kernel_pmd)))
#else
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI)) #define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
#endif
#define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED) #define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED)
......
...@@ -63,7 +63,6 @@ extern pmdval_t early_pmd_flags; ...@@ -63,7 +63,6 @@ extern pmdval_t early_pmd_flags;
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT_XXL */ #else /* !CONFIG_PARAVIRT_XXL */
#define set_pte(ptep, pte) native_set_pte(ptep, pte) #define set_pte(ptep, pte) native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
#define set_pte_atomic(ptep, pte) \ #define set_pte_atomic(ptep, pte) \
native_set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
...@@ -1033,10 +1032,10 @@ static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp) ...@@ -1033,10 +1032,10 @@ static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
return res; return res;
} }
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , pte_t pte) pte_t *ptep, pte_t pte)
{ {
native_set_pte(ptep, pte); set_pte(ptep, pte);
} }
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT_XXL
/* Paravirtualized systems may not have PSE or PGE available */ /* Paravirtualized systems may not have PSE or PGE available */
#define NEED_PSE 0 #define NEED_PSE 0
#define NEED_PGE 0 #define NEED_PGE 0
......
...@@ -222,10 +222,6 @@ ...@@ -222,10 +222,6 @@
#endif #endif
#ifndef CONFIG_PARAVIRT_XXL
# define get_kernel_rpl() 0
#endif
#define IDT_ENTRIES 256 #define IDT_ENTRIES 256
#define NUM_EXCEPTION_VECTORS 32 #define NUM_EXCEPTION_VECTORS 32
......
...@@ -1468,15 +1468,7 @@ static void generic_identify(struct cpuinfo_x86 *c) ...@@ -1468,15 +1468,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
* ESPFIX issue, we can change this. * ESPFIX issue, we can change this.
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# ifdef CONFIG_PARAVIRT_XXL
do {
extern void native_iret(void);
if (pv_ops.cpu.iret == native_iret)
set_cpu_bug(c, X86_BUG_ESPFIX);
} while (0);
# else
set_cpu_bug(c, X86_BUG_ESPFIX); set_cpu_bug(c, X86_BUG_ESPFIX);
# endif
#endif #endif
} }
......
...@@ -776,7 +776,6 @@ __used __visible void *trampoline_handler(struct pt_regs *regs) ...@@ -776,7 +776,6 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
/* fixup registers */ /* fixup registers */
regs->cs = __KERNEL_CS; regs->cs = __KERNEL_CS;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
regs->cs |= get_kernel_rpl();
regs->gs = 0; regs->gs = 0;
#endif #endif
regs->ip = (unsigned long)&kretprobe_trampoline; regs->ip = (unsigned long)&kretprobe_trampoline;
......
...@@ -182,7 +182,6 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) ...@@ -182,7 +182,6 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
/* Save skipped registers */ /* Save skipped registers */
regs->cs = __KERNEL_CS; regs->cs = __KERNEL_CS;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
regs->cs |= get_kernel_rpl();
regs->gs = 0; regs->gs = 0;
#endif #endif
regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE; regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
......
...@@ -263,13 +263,8 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) ...@@ -263,13 +263,8 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
struct pv_info pv_info = { struct pv_info pv_info = {
.name = "bare hardware", .name = "bare hardware",
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL
.kernel_rpl = 0,
.shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
#ifdef CONFIG_X86_64
.extra_user_64bit_cs = __USER_CS, .extra_user_64bit_cs = __USER_CS,
#endif #endif
#endif
}; };
/* 64-bit pagetable entries */ /* 64-bit pagetable entries */
...@@ -305,9 +300,7 @@ struct paravirt_patch_template pv_ops = { ...@@ -305,9 +300,7 @@ struct paravirt_patch_template pv_ops = {
.cpu.load_idt = native_load_idt, .cpu.load_idt = native_load_idt,
.cpu.store_tr = native_store_tr, .cpu.store_tr = native_store_tr,
.cpu.load_tls = native_load_tls, .cpu.load_tls = native_load_tls,
#ifdef CONFIG_X86_64
.cpu.load_gs_index = native_load_gs_index, .cpu.load_gs_index = native_load_gs_index,
#endif
.cpu.write_ldt_entry = native_write_ldt_entry, .cpu.write_ldt_entry = native_write_ldt_entry,
.cpu.write_gdt_entry = native_write_gdt_entry, .cpu.write_gdt_entry = native_write_gdt_entry,
.cpu.write_idt_entry = native_write_idt_entry, .cpu.write_idt_entry = native_write_idt_entry,
...@@ -317,9 +310,7 @@ struct paravirt_patch_template pv_ops = { ...@@ -317,9 +310,7 @@ struct paravirt_patch_template pv_ops = {
.cpu.load_sp0 = native_load_sp0, .cpu.load_sp0 = native_load_sp0,
#ifdef CONFIG_X86_64
.cpu.usergs_sysret64 = native_usergs_sysret64, .cpu.usergs_sysret64 = native_usergs_sysret64,
#endif
.cpu.iret = native_iret, .cpu.iret = native_iret,
.cpu.swapgs = native_swapgs, .cpu.swapgs = native_swapgs,
...@@ -369,24 +360,16 @@ struct paravirt_patch_template pv_ops = { ...@@ -369,24 +360,16 @@ struct paravirt_patch_template pv_ops = {
.mmu.release_p4d = paravirt_nop, .mmu.release_p4d = paravirt_nop,
.mmu.set_pte = native_set_pte, .mmu.set_pte = native_set_pte,
.mmu.set_pte_at = native_set_pte_at,
.mmu.set_pmd = native_set_pmd, .mmu.set_pmd = native_set_pmd,
.mmu.ptep_modify_prot_start = __ptep_modify_prot_start, .mmu.ptep_modify_prot_start = __ptep_modify_prot_start,
.mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit,
#if CONFIG_PGTABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.mmu.set_pte_atomic = native_set_pte_atomic,
.mmu.pte_clear = native_pte_clear,
.mmu.pmd_clear = native_pmd_clear,
#endif
.mmu.set_pud = native_set_pud, .mmu.set_pud = native_set_pud,
.mmu.pmd_val = PTE_IDENT, .mmu.pmd_val = PTE_IDENT,
.mmu.make_pmd = PTE_IDENT, .mmu.make_pmd = PTE_IDENT,
#if CONFIG_PGTABLE_LEVELS >= 4
.mmu.pud_val = PTE_IDENT, .mmu.pud_val = PTE_IDENT,
.mmu.make_pud = PTE_IDENT, .mmu.make_pud = PTE_IDENT,
...@@ -398,8 +381,6 @@ struct paravirt_patch_template pv_ops = { ...@@ -398,8 +381,6 @@ struct paravirt_patch_template pv_ops = {
.mmu.set_pgd = native_set_pgd, .mmu.set_pgd = native_set_pgd,
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
#endif /* CONFIG_PGTABLE_LEVELS >= 4 */
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
.mmu.pte_val = PTE_IDENT, .mmu.pte_val = PTE_IDENT,
.mmu.pgd_val = PTE_IDENT, .mmu.pgd_val = PTE_IDENT,
......
...@@ -26,14 +26,10 @@ struct patch_xxl { ...@@ -26,14 +26,10 @@ struct patch_xxl {
const unsigned char mmu_read_cr3[3]; const unsigned char mmu_read_cr3[3];
const unsigned char mmu_write_cr3[3]; const unsigned char mmu_write_cr3[3];
const unsigned char irq_restore_fl[2]; const unsigned char irq_restore_fl[2];
# ifdef CONFIG_X86_64
const unsigned char cpu_wbinvd[2]; const unsigned char cpu_wbinvd[2];
const unsigned char cpu_usergs_sysret64[6]; const unsigned char cpu_usergs_sysret64[6];
const unsigned char cpu_swapgs[3]; const unsigned char cpu_swapgs[3];
const unsigned char mov64[3]; const unsigned char mov64[3];
# else
const unsigned char cpu_iret[1];
# endif
}; };
static const struct patch_xxl patch_data_xxl = { static const struct patch_xxl patch_data_xxl = {
...@@ -42,7 +38,6 @@ static const struct patch_xxl patch_data_xxl = { ...@@ -42,7 +38,6 @@ static const struct patch_xxl patch_data_xxl = {
.irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
# ifdef CONFIG_X86_64
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3 .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
.irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
...@@ -50,19 +45,11 @@ static const struct patch_xxl patch_data_xxl = { ...@@ -50,19 +45,11 @@ static const struct patch_xxl patch_data_xxl = {
0x48, 0x0f, 0x07 }, // swapgs; sysretq 0x48, 0x0f, 0x07 }, // swapgs; sysretq
.cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs .cpu_swapgs = { 0x0f, 0x01, 0xf8 }, // swapgs
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
# else
.mmu_write_cr3 = { 0x0f, 0x22, 0xd8 }, // mov %eax, %cr3
.irq_restore_fl = { 0x50, 0x9d }, // push %eax; popf
.cpu_iret = { 0xcf }, // iret
# endif
}; };
unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len) unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
{ {
#ifdef CONFIG_X86_64
return PATCH(xxl, mov64, insn_buff, len); return PATCH(xxl, mov64, insn_buff, len);
#endif
return 0;
} }
# endif /* CONFIG_PARAVIRT_XXL */ # endif /* CONFIG_PARAVIRT_XXL */
...@@ -98,13 +85,9 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr, ...@@ -98,13 +85,9 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len); PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len); PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
# ifdef CONFIG_X86_64
PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len); PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len);
PATCH_CASE(cpu, swapgs, xxl, insn_buff, len); PATCH_CASE(cpu, swapgs, xxl, insn_buff, len);
PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len); PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
# else
PATCH_CASE(cpu, iret, xxl, insn_buff, len);
# endif
#endif #endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS #ifdef CONFIG_PARAVIRT_SPINLOCKS
......
...@@ -1014,8 +1014,6 @@ void __init xen_setup_vcpu_info_placement(void) ...@@ -1014,8 +1014,6 @@ void __init xen_setup_vcpu_info_placement(void)
} }
static const struct pv_info xen_info __initconst = { static const struct pv_info xen_info __initconst = {
.shared_kernel_pmd = 0,
.extra_user_64bit_cs = FLAT_USER_CS64, .extra_user_64bit_cs = FLAT_USER_CS64,
.name = "Xen", .name = "Xen",
}; };
...@@ -1314,10 +1312,6 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1314,10 +1312,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_start_info->nr_pages); xen_start_info->nr_pages);
xen_reserve_special_pages(); xen_reserve_special_pages();
/* keep using Xen gdt for now; no urgent need to change it */
pv_info.kernel_rpl = 0;
/* /*
* We used to do this in xen_arch_setup, but that is too late * We used to do this in xen_arch_setup, but that is too late
* on AMD were early_cpu_init (run before ->arch_setup()) calls * on AMD were early_cpu_init (run before ->arch_setup()) calls
......
...@@ -285,13 +285,6 @@ static void xen_set_pte(pte_t *ptep, pte_t pteval) ...@@ -285,13 +285,6 @@ static void xen_set_pte(pte_t *ptep, pte_t pteval)
__xen_set_pte(ptep, pteval); __xen_set_pte(ptep, pteval);
} }
static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
__xen_set_pte(ptep, pteval);
}
pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
...@@ -2105,7 +2098,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { ...@@ -2105,7 +2098,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.release_pmd = xen_release_pmd_init, .release_pmd = xen_release_pmd_init,
.set_pte = xen_set_pte_init, .set_pte = xen_set_pte_init,
.set_pte_at = xen_set_pte_at,
.set_pmd = xen_set_pmd_hyper, .set_pmd = xen_set_pmd_hyper,
.ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_start = __ptep_modify_prot_start,
......
...@@ -153,26 +153,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte, ...@@ -153,26 +153,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte); DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
TRACE_EVENT(xen_mmu_set_pte_at,
TP_PROTO(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval),
TP_ARGS(mm, addr, ptep, pteval),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, addr)
__field(pte_t *, ptep)
__field(pteval_t, pteval)
),
TP_fast_assign(__entry->mm = mm;
__entry->addr = addr;
__entry->ptep = ptep;
__entry->pteval = pteval.pte),
TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
__entry->mm, __entry->addr, __entry->ptep,
(int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
(int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
);
TRACE_DEFINE_SIZEOF(pmdval_t); TRACE_DEFINE_SIZEOF(pmdval_t);
TRACE_EVENT(xen_mmu_set_pmd, TRACE_EVENT(xen_mmu_set_pmd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment