Commit 9bad5658 authored by Juergen Gross's avatar Juergen Gross Committed by Thomas Gleixner

x86/paravirt: Move the Xen-only pv_cpu_ops under the PARAVIRT_XXL umbrella

Most of the paravirt ops defined in pv_cpu_ops are for Xen PV guests
only. Define them only if CONFIG_PARAVIRT_XXL is set.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: xen-devel@lists.xenproject.org
Cc: virtualization@lists.linux-foundation.org
Cc: akataria@vmware.com
Cc: rusty@rustcorp.com.au
Cc: boris.ostrovsky@oracle.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180828074026.820-13-jgross@suse.com
parent 40181646
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
DECLARE_PER_CPU(unsigned long, cpu_dr7); DECLARE_PER_CPU(unsigned long, cpu_dr7);
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT_XXL
/* /*
* These special macros can be used to get or set a debugging register * These special macros can be used to get or set a debugging register
*/ */
......
...@@ -108,7 +108,7 @@ static inline int desc_empty(const void *ptr) ...@@ -108,7 +108,7 @@ static inline int desc_empty(const void *ptr)
return !(desc[0] | desc[1]); return !(desc[0] | desc[1]);
} }
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
#define load_TR_desc() native_load_tr_desc() #define load_TR_desc() native_load_tr_desc()
...@@ -134,7 +134,7 @@ static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) ...@@ -134,7 +134,7 @@ static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{ {
} }
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT_XXL */
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
......
...@@ -122,6 +122,16 @@ static inline notrace unsigned long arch_local_irq_save(void) ...@@ -122,6 +122,16 @@ static inline notrace unsigned long arch_local_irq_save(void)
#define ENABLE_INTERRUPTS(x) sti #define ENABLE_INTERRUPTS(x) sti
#define DISABLE_INTERRUPTS(x) cli #define DISABLE_INTERRUPTS(x) cli
#ifdef CONFIG_X86_64
#ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(x) pushfq; popq %rax
#endif
#endif
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */
#ifndef CONFIG_PARAVIRT_XXL
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#define SWAPGS swapgs #define SWAPGS swapgs
/* /*
...@@ -143,16 +153,12 @@ static inline notrace unsigned long arch_local_irq_save(void) ...@@ -143,16 +153,12 @@ static inline notrace unsigned long arch_local_irq_save(void)
swapgs; \ swapgs; \
sysretl sysretl
#ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(x) pushfq; popq %rax
#endif
#else #else
#define INTERRUPT_RETURN iret #define INTERRUPT_RETURN iret
#endif #endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT_XXL */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
static inline int arch_irqs_disabled_flags(unsigned long flags) static inline int arch_irqs_disabled_flags(unsigned long flags)
......
...@@ -242,7 +242,7 @@ static inline unsigned long long native_read_pmc(int counter) ...@@ -242,7 +242,7 @@ static inline unsigned long long native_read_pmc(int counter)
return EAX_EDX_VAL(val, low, high); return EAX_EDX_VAL(val, low, high);
} }
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
#include <linux/errno.h> #include <linux/errno.h>
...@@ -305,7 +305,7 @@ do { \ ...@@ -305,7 +305,7 @@ do { \
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
#endif /* !CONFIG_PARAVIRT */ #endif /* !CONFIG_PARAVIRT_XXL */
/* /*
* 64-bit version of wrmsr_safe(): * 64-bit version of wrmsr_safe():
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/frame.h> #include <asm/frame.h>
#ifdef CONFIG_PARAVIRT_XXL
static inline void load_sp0(unsigned long sp0) static inline void load_sp0(unsigned long sp0)
{ {
PVOP_VCALL1(cpu.load_sp0, sp0); PVOP_VCALL1(cpu.load_sp0, sp0);
...@@ -51,6 +52,7 @@ static inline void write_cr0(unsigned long x) ...@@ -51,6 +52,7 @@ static inline void write_cr0(unsigned long x)
{ {
PVOP_VCALL1(cpu.write_cr0, x); PVOP_VCALL1(cpu.write_cr0, x);
} }
#endif
static inline unsigned long read_cr2(void) static inline unsigned long read_cr2(void)
{ {
...@@ -72,6 +74,7 @@ static inline void write_cr3(unsigned long x) ...@@ -72,6 +74,7 @@ static inline void write_cr3(unsigned long x)
PVOP_VCALL1(mmu.write_cr3, x); PVOP_VCALL1(mmu.write_cr3, x);
} }
#ifdef CONFIG_PARAVIRT_XXL
static inline void __write_cr4(unsigned long x) static inline void __write_cr4(unsigned long x)
{ {
PVOP_VCALL1(cpu.write_cr4, x); PVOP_VCALL1(cpu.write_cr4, x);
...@@ -88,6 +91,7 @@ static inline void write_cr8(unsigned long x) ...@@ -88,6 +91,7 @@ static inline void write_cr8(unsigned long x)
PVOP_VCALL1(cpu.write_cr8, x); PVOP_VCALL1(cpu.write_cr8, x);
} }
#endif #endif
#endif
static inline void arch_safe_halt(void) static inline void arch_safe_halt(void)
{ {
...@@ -99,14 +103,13 @@ static inline void halt(void) ...@@ -99,14 +103,13 @@ static inline void halt(void)
PVOP_VCALL0(irq.halt); PVOP_VCALL0(irq.halt);
} }
#ifdef CONFIG_PARAVIRT_XXL
static inline void wbinvd(void) static inline void wbinvd(void)
{ {
PVOP_VCALL0(cpu.wbinvd); PVOP_VCALL0(cpu.wbinvd);
} }
#ifdef CONFIG_PARAVIRT_XXL
#define get_kernel_rpl() (pv_info.kernel_rpl) #define get_kernel_rpl() (pv_info.kernel_rpl)
#endif
static inline u64 paravirt_read_msr(unsigned msr) static inline u64 paravirt_read_msr(unsigned msr)
{ {
...@@ -171,6 +174,7 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) ...@@ -171,6 +174,7 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = paravirt_read_msr_safe(msr, &err); *p = paravirt_read_msr_safe(msr, &err);
return err; return err;
} }
#endif
static inline unsigned long long paravirt_sched_clock(void) static inline unsigned long long paravirt_sched_clock(void)
{ {
...@@ -186,6 +190,7 @@ static inline u64 paravirt_steal_clock(int cpu) ...@@ -186,6 +190,7 @@ static inline u64 paravirt_steal_clock(int cpu)
return PVOP_CALL1(u64, time.steal_clock, cpu); return PVOP_CALL1(u64, time.steal_clock, cpu);
} }
#ifdef CONFIG_PARAVIRT_XXL
static inline unsigned long long paravirt_read_pmc(int counter) static inline unsigned long long paravirt_read_pmc(int counter)
{ {
return PVOP_CALL1(u64, cpu.read_pmc, counter); return PVOP_CALL1(u64, cpu.read_pmc, counter);
...@@ -230,6 +235,7 @@ static inline unsigned long paravirt_store_tr(void) ...@@ -230,6 +235,7 @@ static inline unsigned long paravirt_store_tr(void)
{ {
return PVOP_CALL0(unsigned long, cpu.store_tr); return PVOP_CALL0(unsigned long, cpu.store_tr);
} }
#define store_tr(tr) ((tr) = paravirt_store_tr()) #define store_tr(tr) ((tr) = paravirt_store_tr())
static inline void load_TLS(struct thread_struct *t, unsigned cpu) static inline void load_TLS(struct thread_struct *t, unsigned cpu)
{ {
...@@ -263,6 +269,7 @@ static inline void set_iopl_mask(unsigned mask) ...@@ -263,6 +269,7 @@ static inline void set_iopl_mask(unsigned mask)
{ {
PVOP_VCALL1(cpu.set_iopl_mask, mask); PVOP_VCALL1(cpu.set_iopl_mask, mask);
} }
#endif
/* The paravirtualized I/O functions */ /* The paravirtualized I/O functions */
static inline void slow_down_io(void) static inline void slow_down_io(void)
...@@ -618,6 +625,7 @@ static inline void pmd_clear(pmd_t *pmdp) ...@@ -618,6 +625,7 @@ static inline void pmd_clear(pmd_t *pmdp)
} }
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
#ifdef CONFIG_PARAVIRT_XXL
#define __HAVE_ARCH_START_CONTEXT_SWITCH #define __HAVE_ARCH_START_CONTEXT_SWITCH
static inline void arch_start_context_switch(struct task_struct *prev) static inline void arch_start_context_switch(struct task_struct *prev)
{ {
...@@ -628,6 +636,7 @@ static inline void arch_end_context_switch(struct task_struct *next) ...@@ -628,6 +636,7 @@ static inline void arch_end_context_switch(struct task_struct *next)
{ {
PVOP_VCALL1(cpu.end_context_switch, next); PVOP_VCALL1(cpu.end_context_switch, next);
} }
#endif
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void) static inline void arch_enter_lazy_mmu_mode(void)
...@@ -870,10 +879,12 @@ extern void default_banner(void); ...@@ -870,10 +879,12 @@ extern void default_banner(void);
#define PARA_INDIRECT(addr) *%cs:addr #define PARA_INDIRECT(addr) *%cs:addr
#endif #endif
#ifdef CONFIG_PARAVIRT_XXL
#define INTERRUPT_RETURN \ #define INTERRUPT_RETURN \
PARA_SITE(PARA_PATCH(PV_CPU_iret), \ PARA_SITE(PARA_PATCH(PV_CPU_iret), \
ANNOTATE_RETPOLINE_SAFE; \ ANNOTATE_RETPOLINE_SAFE; \
jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);) jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
#endif
#define DISABLE_INTERRUPTS(clobbers) \ #define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \ PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
...@@ -890,6 +901,7 @@ extern void default_banner(void); ...@@ -890,6 +901,7 @@ extern void default_banner(void);
PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
/* /*
* If swapgs is used while the userspace stack is still current, * If swapgs is used while the userspace stack is still current,
* there's no way to call a pvop. The PV replacement *must* be * there's no way to call a pvop. The PV replacement *must* be
...@@ -909,15 +921,18 @@ extern void default_banner(void); ...@@ -909,15 +921,18 @@ extern void default_banner(void);
ANNOTATE_RETPOLINE_SAFE; \ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \ call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
) )
#endif
#define GET_CR2_INTO_RAX \ #define GET_CR2_INTO_RAX \
ANNOTATE_RETPOLINE_SAFE; \ ANNOTATE_RETPOLINE_SAFE; \
call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);
#ifdef CONFIG_PARAVIRT_XXL
#define USERGS_SYSRET64 \ #define USERGS_SYSRET64 \
PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \ PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
ANNOTATE_RETPOLINE_SAFE; \ ANNOTATE_RETPOLINE_SAFE; \
jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);) jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
#endif
#ifdef CONFIG_DEBUG_ENTRY #ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS(clobbers) \ #define SAVE_FLAGS(clobbers) \
......
...@@ -106,6 +106,9 @@ struct pv_time_ops { ...@@ -106,6 +106,9 @@ struct pv_time_ops {
struct pv_cpu_ops { struct pv_cpu_ops {
/* hooks for various privileged instructions */ /* hooks for various privileged instructions */
void (*io_delay)(void);
#ifdef CONFIG_PARAVIRT_XXL
unsigned long (*get_debugreg)(int regno); unsigned long (*get_debugreg)(int regno);
void (*set_debugreg)(int regno, unsigned long value); void (*set_debugreg)(int regno, unsigned long value);
...@@ -143,7 +146,6 @@ struct pv_cpu_ops { ...@@ -143,7 +146,6 @@ struct pv_cpu_ops {
void (*set_iopl_mask)(unsigned mask); void (*set_iopl_mask)(unsigned mask);
void (*wbinvd)(void); void (*wbinvd)(void);
void (*io_delay)(void);
/* cpuid emulation, mostly so that caps bits can be disabled */ /* cpuid emulation, mostly so that caps bits can be disabled */
void (*cpuid)(unsigned int *eax, unsigned int *ebx, void (*cpuid)(unsigned int *eax, unsigned int *ebx,
...@@ -178,6 +180,7 @@ struct pv_cpu_ops { ...@@ -178,6 +180,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev); void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next); void (*end_context_switch)(struct task_struct *next);
#endif
} __no_randomize_layout; } __no_randomize_layout;
struct pv_irq_ops { struct pv_irq_ops {
......
...@@ -111,10 +111,12 @@ extern pmdval_t early_pmd_flags; ...@@ -111,10 +111,12 @@ extern pmdval_t early_pmd_flags;
#define pte_val(x) native_pte_val(x) #define pte_val(x) native_pte_val(x)
#define __pte(x) native_make_pte(x) #define __pte(x) native_make_pte(x)
#define arch_end_context_switch(prev) do {} while(0)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
#ifndef CONFIG_PARAVIRT_XXL
#define arch_end_context_switch(prev) do {} while(0)
#endif /* CONFIG_PARAVIRT_XXL */
/* /*
* The following only work if pte_present() is true. * The following only work if pte_present() is true.
* Undefined behaviour if not.. * Undefined behaviour if not..
......
...@@ -578,7 +578,7 @@ static inline bool on_thread_stack(void) ...@@ -578,7 +578,7 @@ static inline bool on_thread_stack(void)
current_stack_pointer) < THREAD_SIZE; current_stack_pointer) < THREAD_SIZE;
} }
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
#define __cpuid native_cpuid #define __cpuid native_cpuid
...@@ -589,7 +589,7 @@ static inline void load_sp0(unsigned long sp0) ...@@ -589,7 +589,7 @@ static inline void load_sp0(unsigned long sp0)
} }
#define set_iopl_mask native_set_iopl_mask #define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT_XXL */
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
extern void release_thread(struct task_struct *); extern void release_thread(struct task_struct *);
......
...@@ -143,8 +143,9 @@ static inline unsigned long __read_cr4(void) ...@@ -143,8 +143,9 @@ static inline unsigned long __read_cr4(void)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #endif
#ifndef CONFIG_PARAVIRT_XXL
static inline unsigned long read_cr0(void) static inline unsigned long read_cr0(void)
{ {
return native_read_cr0(); return native_read_cr0();
...@@ -154,7 +155,9 @@ static inline void write_cr0(unsigned long x) ...@@ -154,7 +155,9 @@ static inline void write_cr0(unsigned long x)
{ {
native_write_cr0(x); native_write_cr0(x);
} }
#endif
#ifndef CONFIG_PARAVIRT
static inline unsigned long read_cr2(void) static inline unsigned long read_cr2(void)
{ {
return native_read_cr2(); return native_read_cr2();
...@@ -178,7 +181,9 @@ static inline void write_cr3(unsigned long x) ...@@ -178,7 +181,9 @@ static inline void write_cr3(unsigned long x)
{ {
native_write_cr3(x); native_write_cr3(x);
} }
#endif
#ifndef CONFIG_PARAVIRT_XXL
static inline void __write_cr4(unsigned long x) static inline void __write_cr4(unsigned long x)
{ {
native_write_cr4(x); native_write_cr4(x);
...@@ -208,7 +213,7 @@ static inline void load_gs_index(unsigned selector) ...@@ -208,7 +213,7 @@ static inline void load_gs_index(unsigned selector)
#endif #endif
#endif/* CONFIG_PARAVIRT */ #endif/* CONFIG_PARAVIRT_XXL */
static inline void clflush(volatile void *__p) static inline void clflush(volatile void *__p)
{ {
......
...@@ -68,7 +68,9 @@ void common(void) { ...@@ -68,7 +68,9 @@ void common(void) {
BLANK(); BLANK();
OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable);
OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable);
#ifdef CONFIG_PARAVIRT_XXL
OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret);
#endif
OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2);
#endif #endif
......
...@@ -21,9 +21,11 @@ static char syscalls_ia32[] = { ...@@ -21,9 +21,11 @@ static char syscalls_ia32[] = {
int main(void) int main(void)
{ {
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#ifdef CONFIG_PARAVIRT_XXL
OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template, OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template,
cpu.usergs_sysret64); cpu.usergs_sysret64);
OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs); OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs);
#endif
#ifdef CONFIG_DEBUG_ENTRY #ifdef CONFIG_DEBUG_ENTRY
OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl); OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl);
#endif #endif
......
...@@ -1240,7 +1240,7 @@ static void generic_identify(struct cpuinfo_x86 *c) ...@@ -1240,7 +1240,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
* ESPFIX issue, we can change this. * ESPFIX issue, we can change this.
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# ifdef CONFIG_PARAVIRT # ifdef CONFIG_PARAVIRT_XXL
do { do {
extern void native_iret(void); extern void native_iret(void);
if (pv_ops.cpu.iret == native_iret) if (pv_ops.cpu.iret == native_iret)
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
#else #else
#define GET_CR2_INTO(reg) movq %cr2, reg #define GET_CR2_INTO(reg) movq %cr2, reg
#endif
#ifndef CONFIG_PARAVIRT_XXL
#define INTERRUPT_RETURN iretq #define INTERRUPT_RETURN iretq
#endif #endif
......
...@@ -101,6 +101,7 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target, ...@@ -101,6 +101,7 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target,
return 5; return 5;
} }
#ifdef CONFIG_PARAVIRT_XXL
static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
unsigned long addr, unsigned len) unsigned long addr, unsigned len)
{ {
...@@ -119,6 +120,7 @@ static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, ...@@ -119,6 +120,7 @@ static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
return 5; return 5;
} }
#endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
...@@ -150,10 +152,12 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf, ...@@ -150,10 +152,12 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf,
else if (opfunc == _paravirt_ident_64) else if (opfunc == _paravirt_ident_64)
ret = paravirt_patch_ident_64(insnbuf, len); ret = paravirt_patch_ident_64(insnbuf, len);
#ifdef CONFIG_PARAVIRT_XXL
else if (type == PARAVIRT_PATCH(cpu.iret) || else if (type == PARAVIRT_PATCH(cpu.iret) ||
type == PARAVIRT_PATCH(cpu.usergs_sysret64)) type == PARAVIRT_PATCH(cpu.usergs_sysret64))
/* If operation requires a jmp, then jmp */ /* If operation requires a jmp, then jmp */
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
#endif
else else
/* Otherwise call the function. */ /* Otherwise call the function. */
ret = paravirt_patch_call(insnbuf, opfunc, addr, len); ret = paravirt_patch_call(insnbuf, opfunc, addr, len);
...@@ -262,6 +266,7 @@ void paravirt_flush_lazy_mmu(void) ...@@ -262,6 +266,7 @@ void paravirt_flush_lazy_mmu(void)
preempt_enable(); preempt_enable();
} }
#ifdef CONFIG_PARAVIRT_XXL
void paravirt_start_context_switch(struct task_struct *prev) void paravirt_start_context_switch(struct task_struct *prev)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
...@@ -282,6 +287,7 @@ void paravirt_end_context_switch(struct task_struct *next) ...@@ -282,6 +287,7 @@ void paravirt_end_context_switch(struct task_struct *next)
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
arch_enter_lazy_mmu_mode(); arch_enter_lazy_mmu_mode();
} }
#endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void) enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
{ {
...@@ -320,6 +326,9 @@ struct paravirt_patch_template pv_ops = { ...@@ -320,6 +326,9 @@ struct paravirt_patch_template pv_ops = {
.time.steal_clock = native_steal_clock, .time.steal_clock = native_steal_clock,
/* Cpu ops. */ /* Cpu ops. */
.cpu.io_delay = native_io_delay,
#ifdef CONFIG_PARAVIRT_XXL
.cpu.cpuid = native_cpuid, .cpu.cpuid = native_cpuid,
.cpu.get_debugreg = native_get_debugreg, .cpu.get_debugreg = native_get_debugreg,
.cpu.set_debugreg = native_set_debugreg, .cpu.set_debugreg = native_set_debugreg,
...@@ -361,10 +370,10 @@ struct paravirt_patch_template pv_ops = { ...@@ -361,10 +370,10 @@ struct paravirt_patch_template pv_ops = {
.cpu.swapgs = native_swapgs, .cpu.swapgs = native_swapgs,
.cpu.set_iopl_mask = native_set_iopl_mask, .cpu.set_iopl_mask = native_set_iopl_mask,
.cpu.io_delay = native_io_delay,
.cpu.start_context_switch = paravirt_nop, .cpu.start_context_switch = paravirt_nop,
.cpu.end_context_switch = paravirt_nop, .cpu.end_context_switch = paravirt_nop,
#endif /* CONFIG_PARAVIRT_XXL */
/* Irq ops. */ /* Irq ops. */
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
...@@ -464,10 +473,12 @@ struct paravirt_patch_template pv_ops = { ...@@ -464,10 +473,12 @@ struct paravirt_patch_template pv_ops = {
#endif #endif
}; };
#ifdef CONFIG_PARAVIRT_XXL
/* At this point, native_get/set_debugreg has real function entries */ /* At this point, native_get/set_debugreg has real function entries */
NOKPROBE_SYMBOL(native_get_debugreg); NOKPROBE_SYMBOL(native_get_debugreg);
NOKPROBE_SYMBOL(native_set_debugreg); NOKPROBE_SYMBOL(native_set_debugreg);
NOKPROBE_SYMBOL(native_load_idt); NOKPROBE_SYMBOL(native_load_idt);
#endif
EXPORT_SYMBOL_GPL(pv_ops); EXPORT_SYMBOL_GPL(pv_ops);
EXPORT_SYMBOL_GPL(pv_info); EXPORT_SYMBOL_GPL(pv_info);
...@@ -5,7 +5,9 @@ DEF_NATIVE(irq, irq_disable, "cli"); ...@@ -5,7 +5,9 @@ DEF_NATIVE(irq, irq_disable, "cli");
DEF_NATIVE(irq, irq_enable, "sti"); DEF_NATIVE(irq, irq_enable, "sti");
DEF_NATIVE(irq, restore_fl, "push %eax; popf"); DEF_NATIVE(irq, restore_fl, "push %eax; popf");
DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); DEF_NATIVE(irq, save_fl, "pushf; pop %eax");
#ifdef CONFIG_PARAVIRT_XXL
DEF_NATIVE(cpu, iret, "iret"); DEF_NATIVE(cpu, iret, "iret");
#endif
DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
...@@ -45,7 +47,9 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) ...@@ -45,7 +47,9 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, irq_enable);
PATCH_SITE(irq, restore_fl); PATCH_SITE(irq, restore_fl);
PATCH_SITE(irq, save_fl); PATCH_SITE(irq, save_fl);
#ifdef CONFIG_PARAVIRT_XXL
PATCH_SITE(cpu, iret); PATCH_SITE(cpu, iret);
#endif
PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr2);
PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, read_cr3);
PATCH_SITE(mmu, write_cr3); PATCH_SITE(mmu, write_cr3);
......
...@@ -10,10 +10,12 @@ DEF_NATIVE(irq, save_fl, "pushfq; popq %rax"); ...@@ -10,10 +10,12 @@ DEF_NATIVE(irq, save_fl, "pushfq; popq %rax");
DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax");
DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax");
DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3");
#ifdef CONFIG_PARAVIRT_XXL
DEF_NATIVE(cpu, wbinvd, "wbinvd"); DEF_NATIVE(cpu, wbinvd, "wbinvd");
DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
DEF_NATIVE(cpu, swapgs, "swapgs"); DEF_NATIVE(cpu, swapgs, "swapgs");
#endif
DEF_NATIVE(, mov32, "mov %edi, %eax"); DEF_NATIVE(, mov32, "mov %edi, %eax");
DEF_NATIVE(, mov64, "mov %rdi, %rax"); DEF_NATIVE(, mov64, "mov %rdi, %rax");
...@@ -53,12 +55,14 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len) ...@@ -53,12 +55,14 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
PATCH_SITE(irq, save_fl); PATCH_SITE(irq, save_fl);
PATCH_SITE(irq, irq_enable); PATCH_SITE(irq, irq_enable);
PATCH_SITE(irq, irq_disable); PATCH_SITE(irq, irq_disable);
#ifdef CONFIG_PARAVIRT_XXL
PATCH_SITE(cpu, usergs_sysret64); PATCH_SITE(cpu, usergs_sysret64);
PATCH_SITE(cpu, swapgs); PATCH_SITE(cpu, swapgs);
PATCH_SITE(cpu, wbinvd);
#endif
PATCH_SITE(mmu, read_cr2); PATCH_SITE(mmu, read_cr2);
PATCH_SITE(mmu, read_cr3); PATCH_SITE(mmu, read_cr3);
PATCH_SITE(mmu, write_cr3); PATCH_SITE(mmu, write_cr3);
PATCH_SITE(cpu, wbinvd);
#if defined(CONFIG_PARAVIRT_SPINLOCKS) #if defined(CONFIG_PARAVIRT_SPINLOCKS)
case PARAVIRT_PATCH(lock.queued_spin_unlock): case PARAVIRT_PATCH(lock.queued_spin_unlock):
if (pv_is_native_spin_unlock()) { if (pv_is_native_spin_unlock()) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment