Commit 12f2bbd6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asmlinkage (LTO) changes from Peter Anvin:
 "This patchset adds more infrastructure for link time optimization
  (LTO).

  This patchset was pulled into my tree late because of a
  miscommunication (part of the patchset was picked up by other
  maintainers).  However, the patchset is strictly build-related and
  seems to be okay in testing"

* 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, asmlinkage, xen: Fix type of NMI
  x86, asmlinkage, xen, kvm: Make {xen,kvm}_lock_spinning global and visible
  x86: Use inline assembler instead of global register variable to get sp
  x86, asmlinkage, paravirt: Make paravirt thunks global
  x86, asmlinkage, paravirt: Don't rely on local assembler labels
  x86, asmlinkage, lguest: Fix C functions used by inline assembler
parents 10ffe3db 07ba06d9
...@@ -781,9 +781,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, ...@@ -781,9 +781,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
*/ */
#define PV_CALLEE_SAVE_REGS_THUNK(func) \ #define PV_CALLEE_SAVE_REGS_THUNK(func) \
extern typeof(func) __raw_callee_save_##func; \ extern typeof(func) __raw_callee_save_##func; \
static void *__##func##__ __used = func; \
\ \
asm(".pushsection .text;" \ asm(".pushsection .text;" \
".globl __raw_callee_save_" #func " ; " \
"__raw_callee_save_" #func ": " \ "__raw_callee_save_" #func ": " \
PV_SAVE_ALL_CALLER_REGS \ PV_SAVE_ALL_CALLER_REGS \
"call " #func ";" \ "call " #func ";" \
......
...@@ -388,10 +388,11 @@ extern struct pv_lock_ops pv_lock_ops; ...@@ -388,10 +388,11 @@ extern struct pv_lock_ops pv_lock_ops;
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
/* Simple instruction patching code. */ /* Simple instruction patching code. */
#define DEF_NATIVE(ops, name, code) \ #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
extern const char start_##ops##_##name[] __visible, \
end_##ops##_##name[] __visible; \ #define DEF_NATIVE(ops, name, code) \
asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
unsigned paravirt_patch_nop(void); unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
......
...@@ -163,9 +163,11 @@ struct thread_info { ...@@ -163,9 +163,11 @@ struct thread_info {
*/ */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define current_stack_pointer ({ \
/* how to get the current stack pointer from C */ unsigned long sp; \
register unsigned long current_stack_pointer asm("esp") __used; asm("mov %%esp,%0" : "=g" (sp)); \
sp; \
})
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
......
...@@ -673,7 +673,7 @@ static cpumask_t waiting_cpus; ...@@ -673,7 +673,7 @@ static cpumask_t waiting_cpus;
/* Track spinlock on which a cpu is waiting */ /* Track spinlock on which a cpu is waiting */
static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting); static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{ {
struct kvm_lock_waiting *w; struct kvm_lock_waiting *w;
int cpu; int cpu;
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
* and vice versa. * and vice versa.
*/ */
static unsigned long vsmp_save_fl(void) asmlinkage unsigned long vsmp_save_fl(void)
{ {
unsigned long flags = native_save_fl(); unsigned long flags = native_save_fl();
...@@ -43,7 +43,7 @@ static unsigned long vsmp_save_fl(void) ...@@ -43,7 +43,7 @@ static unsigned long vsmp_save_fl(void)
} }
PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
static void vsmp_restore_fl(unsigned long flags) __visible void vsmp_restore_fl(unsigned long flags)
{ {
if (flags & X86_EFLAGS_IF) if (flags & X86_EFLAGS_IF)
flags &= ~X86_EFLAGS_AC; flags &= ~X86_EFLAGS_AC;
...@@ -53,7 +53,7 @@ static void vsmp_restore_fl(unsigned long flags) ...@@ -53,7 +53,7 @@ static void vsmp_restore_fl(unsigned long flags)
} }
PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
static void vsmp_irq_disable(void) asmlinkage void vsmp_irq_disable(void)
{ {
unsigned long flags = native_save_fl(); unsigned long flags = native_save_fl();
...@@ -61,7 +61,7 @@ static void vsmp_irq_disable(void) ...@@ -61,7 +61,7 @@ static void vsmp_irq_disable(void)
} }
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
static void vsmp_irq_enable(void) asmlinkage void vsmp_irq_enable(void)
{ {
unsigned long flags = native_save_fl(); unsigned long flags = native_save_fl();
......
...@@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next) ...@@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next)
* flags word contains all kind of stuff, but in practice Linux only cares * flags word contains all kind of stuff, but in practice Linux only cares
* about the interrupt flag. Our "save_flags()" just returns that. * about the interrupt flag. Our "save_flags()" just returns that.
*/ */
static unsigned long save_fl(void) asmlinkage unsigned long lguest_save_fl(void)
{ {
return lguest_data.irq_enabled; return lguest_data.irq_enabled;
} }
/* Interrupts go off... */ /* Interrupts go off... */
static void irq_disable(void) asmlinkage void lguest_irq_disable(void)
{ {
lguest_data.irq_enabled = 0; lguest_data.irq_enabled = 0;
} }
...@@ -253,8 +253,8 @@ static void irq_disable(void) ...@@ -253,8 +253,8 @@ static void irq_disable(void)
* PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
* C function, then restores it. * C function, then restores it.
*/ */
PV_CALLEE_SAVE_REGS_THUNK(save_fl); PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl);
PV_CALLEE_SAVE_REGS_THUNK(irq_disable); PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable);
/*:*/ /*:*/
/* These are in i386_head.S */ /* These are in i386_head.S */
...@@ -1291,9 +1291,9 @@ __init void lguest_init(void) ...@@ -1291,9 +1291,9 @@ __init void lguest_init(void)
*/ */
/* Interrupt-related operations */ /* Interrupt-related operations */
pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl);
pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable); pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable);
pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
pv_irq_ops.safe_halt = lguest_safe_halt; pv_irq_ops.safe_halt = lguest_safe_halt;
......
...@@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void) ...@@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL); (void)HYPERVISOR_xen_version(0, NULL);
} }
static unsigned long xen_save_fl(void) asmlinkage unsigned long xen_save_fl(void)
{ {
struct vcpu_info *vcpu; struct vcpu_info *vcpu;
unsigned long flags; unsigned long flags;
...@@ -41,7 +41,7 @@ static unsigned long xen_save_fl(void) ...@@ -41,7 +41,7 @@ static unsigned long xen_save_fl(void)
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl); PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
static void xen_restore_fl(unsigned long flags) __visible void xen_restore_fl(unsigned long flags)
{ {
struct vcpu_info *vcpu; struct vcpu_info *vcpu;
...@@ -63,7 +63,7 @@ static void xen_restore_fl(unsigned long flags) ...@@ -63,7 +63,7 @@ static void xen_restore_fl(unsigned long flags)
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
static void xen_irq_disable(void) asmlinkage void xen_irq_disable(void)
{ {
/* There's a one instruction preempt window here. We need to /* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu make sure we're don't switch CPUs between getting the vcpu
...@@ -74,7 +74,7 @@ static void xen_irq_disable(void) ...@@ -74,7 +74,7 @@ static void xen_irq_disable(void)
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable); PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
static void xen_irq_enable(void) asmlinkage void xen_irq_enable(void)
{ {
struct vcpu_info *vcpu; struct vcpu_info *vcpu;
......
...@@ -431,7 +431,7 @@ static pteval_t iomap_pte(pteval_t val) ...@@ -431,7 +431,7 @@ static pteval_t iomap_pte(pteval_t val)
return val; return val;
} }
static pteval_t xen_pte_val(pte_t pte) __visible pteval_t xen_pte_val(pte_t pte)
{ {
pteval_t pteval = pte.pte; pteval_t pteval = pte.pte;
#if 0 #if 0
...@@ -448,7 +448,7 @@ static pteval_t xen_pte_val(pte_t pte) ...@@ -448,7 +448,7 @@ static pteval_t xen_pte_val(pte_t pte)
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
static pgdval_t xen_pgd_val(pgd_t pgd) __visible pgdval_t xen_pgd_val(pgd_t pgd)
{ {
return pte_mfn_to_pfn(pgd.pgd); return pte_mfn_to_pfn(pgd.pgd);
} }
...@@ -479,7 +479,7 @@ void xen_set_pat(u64 pat) ...@@ -479,7 +479,7 @@ void xen_set_pat(u64 pat)
WARN_ON(pat != 0x0007010600070106ull); WARN_ON(pat != 0x0007010600070106ull);
} }
static pte_t xen_make_pte(pteval_t pte) __visible pte_t xen_make_pte(pteval_t pte)
{ {
phys_addr_t addr = (pte & PTE_PFN_MASK); phys_addr_t addr = (pte & PTE_PFN_MASK);
#if 0 #if 0
...@@ -514,14 +514,14 @@ static pte_t xen_make_pte(pteval_t pte) ...@@ -514,14 +514,14 @@ static pte_t xen_make_pte(pteval_t pte)
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
static pgd_t xen_make_pgd(pgdval_t pgd) __visible pgd_t xen_make_pgd(pgdval_t pgd)
{ {
pgd = pte_pfn_to_mfn(pgd); pgd = pte_pfn_to_mfn(pgd);
return native_make_pgd(pgd); return native_make_pgd(pgd);
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
static pmdval_t xen_pmd_val(pmd_t pmd) __visible pmdval_t xen_pmd_val(pmd_t pmd)
{ {
return pte_mfn_to_pfn(pmd.pmd); return pte_mfn_to_pfn(pmd.pmd);
} }
...@@ -580,7 +580,7 @@ static void xen_pmd_clear(pmd_t *pmdp) ...@@ -580,7 +580,7 @@ static void xen_pmd_clear(pmd_t *pmdp)
} }
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
static pmd_t xen_make_pmd(pmdval_t pmd) __visible pmd_t xen_make_pmd(pmdval_t pmd)
{ {
pmd = pte_pfn_to_mfn(pmd); pmd = pte_pfn_to_mfn(pmd);
return native_make_pmd(pmd); return native_make_pmd(pmd);
...@@ -588,13 +588,13 @@ static pmd_t xen_make_pmd(pmdval_t pmd) ...@@ -588,13 +588,13 @@ static pmd_t xen_make_pmd(pmdval_t pmd)
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
#if PAGETABLE_LEVELS == 4 #if PAGETABLE_LEVELS == 4
static pudval_t xen_pud_val(pud_t pud) __visible pudval_t xen_pud_val(pud_t pud)
{ {
return pte_mfn_to_pfn(pud.pud); return pte_mfn_to_pfn(pud.pud);
} }
PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
static pud_t xen_make_pud(pudval_t pud) __visible pud_t xen_make_pud(pudval_t pud)
{ {
pud = pte_pfn_to_mfn(pud); pud = pte_pfn_to_mfn(pud);
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
extern const char xen_hypervisor_callback[]; extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[]; extern const char xen_failsafe_callback[];
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
extern const char nmi[]; extern asmlinkage void nmi(void);
#endif #endif
extern void xen_sysenter_target(void); extern void xen_sysenter_target(void);
extern void xen_syscall_target(void); extern void xen_syscall_target(void);
...@@ -577,7 +577,7 @@ void xen_enable_syscall(void) ...@@ -577,7 +577,7 @@ void xen_enable_syscall(void)
void xen_enable_nmi(void) void xen_enable_nmi(void)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (register_callback(CALLBACKTYPE_nmi, nmi)) if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
BUG(); BUG();
#endif #endif
} }
......
...@@ -106,7 +106,7 @@ static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); ...@@ -106,7 +106,7 @@ static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus; static cpumask_t waiting_cpus;
static bool xen_pvspin = true; static bool xen_pvspin = true;
static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{ {
int irq = __this_cpu_read(lock_kicker_irq); int irq = __this_cpu_read(lock_kicker_irq);
struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting); struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment