Commit 95f66b37 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/asm' into x86/mm

parents 46cb27f5 9f331119
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
static inline int invalid_vm86_irq(int irq) static inline int invalid_vm86_irq(int irq)
{ {
return irq < 3 || irq > 15; return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
} }
#endif #endif
......
...@@ -192,14 +192,26 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src, ...@@ -192,14 +192,26 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src,
unsigned size) unsigned size)
{ {
might_sleep(); might_sleep();
return __copy_user_nocache(dst, src, size, 1); /*
* In practice this limit means that large file write()s
* which get chunked to 4K copies get handled via
* non-temporal stores here. Smaller writes get handled
* via regular __copy_from_user():
*/
if (likely(size >= PAGE_SIZE))
return __copy_user_nocache(dst, src, size, 1);
else
return __copy_from_user(dst, src, size);
} }
static inline int __copy_from_user_inatomic_nocache(void *dst, static inline int __copy_from_user_inatomic_nocache(void *dst,
const void __user *src, const void __user *src,
unsigned size) unsigned size)
{ {
return __copy_user_nocache(dst, src, size, 0); if (likely(size >= PAGE_SIZE))
return __copy_user_nocache(dst, src, size, 0);
else
return __copy_from_user_inatomic(dst, src, size);
} }
unsigned long unsigned long
......
...@@ -113,6 +113,7 @@ ENTRY(efi_call_phys) ...@@ -113,6 +113,7 @@ ENTRY(efi_call_phys)
movl (%edx), %ecx movl (%edx), %ecx
pushl %ecx pushl %ecx
ret ret
ENDPROC(efi_call_phys)
.previous .previous
.data .data
......
...@@ -41,6 +41,7 @@ ENTRY(efi_call0) ...@@ -41,6 +41,7 @@ ENTRY(efi_call0)
addq $32, %rsp addq $32, %rsp
RESTORE_XMM RESTORE_XMM
ret ret
ENDPROC(efi_call0)
ENTRY(efi_call1) ENTRY(efi_call1)
SAVE_XMM SAVE_XMM
...@@ -50,6 +51,7 @@ ENTRY(efi_call1) ...@@ -50,6 +51,7 @@ ENTRY(efi_call1)
addq $32, %rsp addq $32, %rsp
RESTORE_XMM RESTORE_XMM
ret ret
ENDPROC(efi_call1)
ENTRY(efi_call2) ENTRY(efi_call2)
SAVE_XMM SAVE_XMM
...@@ -59,6 +61,7 @@ ENTRY(efi_call2) ...@@ -59,6 +61,7 @@ ENTRY(efi_call2)
addq $32, %rsp addq $32, %rsp
RESTORE_XMM RESTORE_XMM
ret ret
ENDPROC(efi_call2)
ENTRY(efi_call3) ENTRY(efi_call3)
SAVE_XMM SAVE_XMM
...@@ -69,6 +72,7 @@ ENTRY(efi_call3) ...@@ -69,6 +72,7 @@ ENTRY(efi_call3)
addq $32, %rsp addq $32, %rsp
RESTORE_XMM RESTORE_XMM
ret ret
ENDPROC(efi_call3)
ENTRY(efi_call4) ENTRY(efi_call4)
SAVE_XMM SAVE_XMM
...@@ -80,6 +84,7 @@ ENTRY(efi_call4) ...@@ -80,6 +84,7 @@ ENTRY(efi_call4)
addq $32, %rsp addq $32, %rsp
RESTORE_XMM RESTORE_XMM
ret ret
ENDPROC(efi_call4)
ENTRY(efi_call5) ENTRY(efi_call5)
SAVE_XMM SAVE_XMM
...@@ -92,6 +97,7 @@ ENTRY(efi_call5) ...@@ -92,6 +97,7 @@ ENTRY(efi_call5)
addq $48, %rsp addq $48, %rsp
RESTORE_XMM RESTORE_XMM
ret ret
ENDPROC(efi_call5)
ENTRY(efi_call6) ENTRY(efi_call6)
SAVE_XMM SAVE_XMM
...@@ -107,3 +113,4 @@ ENTRY(efi_call6) ...@@ -107,3 +113,4 @@ ENTRY(efi_call6)
addq $48, %rsp addq $48, %rsp
RESTORE_XMM RESTORE_XMM
ret ret
ENDPROC(efi_call6)
...@@ -77,20 +77,17 @@ ENTRY(ftrace_caller) ...@@ -77,20 +77,17 @@ ENTRY(ftrace_caller)
movq 8(%rbp), %rsi movq 8(%rbp), %rsi
subq $MCOUNT_INSN_SIZE, %rdi subq $MCOUNT_INSN_SIZE, %rdi
.globl ftrace_call GLOBAL(ftrace_call)
ftrace_call:
call ftrace_stub call ftrace_stub
MCOUNT_RESTORE_FRAME MCOUNT_RESTORE_FRAME
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call GLOBAL(ftrace_graph_call)
ftrace_graph_call:
jmp ftrace_stub jmp ftrace_stub
#endif #endif
.globl ftrace_stub GLOBAL(ftrace_stub)
ftrace_stub:
retq retq
END(ftrace_caller) END(ftrace_caller)
...@@ -110,8 +107,7 @@ ENTRY(mcount) ...@@ -110,8 +107,7 @@ ENTRY(mcount)
jnz ftrace_graph_caller jnz ftrace_graph_caller
#endif #endif
.globl ftrace_stub GLOBAL(ftrace_stub)
ftrace_stub:
retq retq
trace: trace:
...@@ -148,9 +144,7 @@ ENTRY(ftrace_graph_caller) ...@@ -148,9 +144,7 @@ ENTRY(ftrace_graph_caller)
retq retq
END(ftrace_graph_caller) END(ftrace_graph_caller)
GLOBAL(return_to_handler)
.globl return_to_handler
return_to_handler:
subq $80, %rsp subq $80, %rsp
movq %rax, (%rsp) movq %rax, (%rsp)
...@@ -188,6 +182,7 @@ return_to_handler: ...@@ -188,6 +182,7 @@ return_to_handler:
ENTRY(native_usergs_sysret64) ENTRY(native_usergs_sysret64)
swapgs swapgs
sysretq sysretq
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
...@@ -633,16 +628,14 @@ tracesys: ...@@ -633,16 +628,14 @@ tracesys:
* Syscall return path ending with IRET. * Syscall return path ending with IRET.
* Has correct top of stack, but partial stack frame. * Has correct top of stack, but partial stack frame.
*/ */
.globl int_ret_from_sys_call GLOBAL(int_ret_from_sys_call)
.globl int_with_check
int_ret_from_sys_call:
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl $3,CS-ARGOFFSET(%rsp) testl $3,CS-ARGOFFSET(%rsp)
je retint_restore_args je retint_restore_args
movl $_TIF_ALLWORK_MASK,%edi movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */ /* edi: mask to check */
int_with_check: GLOBAL(int_with_check)
LOCKDEP_SYS_EXIT_IRQ LOCKDEP_SYS_EXIT_IRQ
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
movl TI_flags(%rcx),%edx movl TI_flags(%rcx),%edx
......
...@@ -329,8 +329,6 @@ early_idt_ripmsg: ...@@ -329,8 +329,6 @@ early_idt_ripmsg:
#endif /* CONFIG_EARLY_PRINTK */ #endif /* CONFIG_EARLY_PRINTK */
.previous .previous
.balign PAGE_SIZE
#define NEXT_PAGE(name) \ #define NEXT_PAGE(name) \
.balign PAGE_SIZE; \ .balign PAGE_SIZE; \
ENTRY(name) ENTRY(name)
...@@ -419,7 +417,7 @@ ENTRY(phys_base) ...@@ -419,7 +417,7 @@ ENTRY(phys_base)
.section .bss, "aw", @nobits .section .bss, "aw", @nobits
.align L1_CACHE_BYTES .align L1_CACHE_BYTES
ENTRY(idt_table) ENTRY(idt_table)
.skip 256 * 16 .skip IDT_ENTRIES * 16
.section .bss.page_aligned, "aw", @nobits .section .bss.page_aligned, "aw", @nobits
.align PAGE_SIZE .align PAGE_SIZE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment