Commit 60f898ee authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 asm changes from Ingo Molnar:
 "There were lots of changes in this development cycle:

   - over 100 separate cleanups, restructuring changes, speedups and
     fixes in the x86 system call, irq, trap and other entry code, part
     of a heroic effort to deobfuscate a decade old spaghetti asm code
     and its C code dependencies (Denys Vlasenko, Andy Lutomirski)

   - alternatives code fixes and enhancements (Borislav Petkov)

   - simplifications and cleanups to the compat code (Brian Gerst)

   - signal handling fixes and new x86 testcases (Andy Lutomirski)

   - various other fixes and cleanups

  By their nature many of these changes are risky - we tried to test
  them well on many different x86 systems (there are no known
  regressions), and they are split up finely to help bisection - but
  there's still a fair bit of residual risk left so caveat emptor"

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (148 commits)
  perf/x86/64: Report regs_user->ax too in get_regs_user()
  perf/x86/64: Simplify regs_user->abi setting code in get_regs_user()
  perf/x86/64: Do report user_regs->cx while we are in syscall, in get_regs_user()
  perf/x86/64: Do not guess user_regs->cs, ss, sp in get_regs_user()
  x86/asm/entry/32: Tidy up JNZ instructions after TESTs
  x86/asm/entry/64: Reduce padding in execve stubs
  x86/asm/entry/64: Remove GET_THREAD_INFO() in ret_from_fork
  x86/asm/entry/64: Simplify jumps in ret_from_fork
  x86/asm/entry/64: Remove a redundant jump
  x86/asm/entry/64: Optimize [v]fork/clone stubs
  x86/asm/entry: Zero EXTRA_REGS for stub32_execve() too
  x86/asm/entry/64: Move stub_x32_execvecloser() to stub_execveat()
  x86/asm/entry/64: Use common code for rt_sigreturn() epilogue
  x86/asm/entry/64: Add forgotten CFI annotation
  x86/asm/entry/irq: Simplify interrupt dispatch table (IDT) layout
  x86/asm/entry/64: Move opportunistic sysret code to syscall code path
  x86, selftests: Add sigreturn selftest
  x86/alternatives: Guard NOPs optimization
  x86/asm/entry: Clear EXTRA_REGS for all executable formats
  x86/signal: Remove pax argument from restore_sigcontext
  ...
parents 977e1ba5 3b75232d
...@@ -406,6 +406,12 @@ Protocol: 2.00+ ...@@ -406,6 +406,12 @@ Protocol: 2.00+
- If 0, the protected-mode code is loaded at 0x10000. - If 0, the protected-mode code is loaded at 0x10000.
- If 1, the protected-mode code is loaded at 0x100000. - If 1, the protected-mode code is loaded at 0x100000.
Bit 1 (kernel internal): ALSR_FLAG
- Used internally by the compressed kernel to communicate
KASLR status to kernel proper.
If 1, KASLR enabled.
If 0, KASLR disabled.
Bit 5 (write): QUIET_FLAG Bit 5 (write): QUIET_FLAG
- If 0, print early messages. - If 0, print early messages.
- If 1, suppress early messages. - If 1, suppress early messages.
......
...@@ -295,7 +295,8 @@ static unsigned long find_random_addr(unsigned long minimum, ...@@ -295,7 +295,8 @@ static unsigned long find_random_addr(unsigned long minimum,
return slots_fetch_random(); return slots_fetch_random();
} }
unsigned char *choose_kernel_location(unsigned char *input, unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size, unsigned long input_size,
unsigned char *output, unsigned char *output,
unsigned long output_size) unsigned long output_size)
...@@ -315,6 +316,8 @@ unsigned char *choose_kernel_location(unsigned char *input, ...@@ -315,6 +316,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
} }
#endif #endif
boot_params->hdr.loadflags |= KASLR_FLAG;
/* Record the various known unsafe memory ranges. */ /* Record the various known unsafe memory ranges. */
mem_avoid_init((unsigned long)input, input_size, mem_avoid_init((unsigned long)input, input_size,
(unsigned long)output, output_size); (unsigned long)output, output_size);
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/bootparam.h>
__HEAD __HEAD
ENTRY(startup_32) ENTRY(startup_32)
...@@ -102,7 +103,7 @@ preferred_addr: ...@@ -102,7 +103,7 @@ preferred_addr:
* Test KEEP_SEGMENTS flag to see if the bootloader is asking * Test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments * us to not reload segments
*/ */
testb $(1<<6), BP_loadflags(%esi) testb $KEEP_SEGMENTS, BP_loadflags(%esi)
jnz 1f jnz 1f
cli cli
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/bootparam.h>
__HEAD __HEAD
.code32 .code32
...@@ -46,7 +47,7 @@ ENTRY(startup_32) ...@@ -46,7 +47,7 @@ ENTRY(startup_32)
* Test KEEP_SEGMENTS flag to see if the bootloader is asking * Test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments * us to not reload segments
*/ */
testb $(1<<6), BP_loadflags(%esi) testb $KEEP_SEGMENTS, BP_loadflags(%esi)
jnz 1f jnz 1f
cli cli
...@@ -164,7 +165,7 @@ ENTRY(startup_32) ...@@ -164,7 +165,7 @@ ENTRY(startup_32)
/* After gdt is loaded */ /* After gdt is loaded */
xorl %eax, %eax xorl %eax, %eax
lldt %ax lldt %ax
movl $0x20, %eax movl $__BOOT_TSS, %eax
ltr %ax ltr %ax
/* /*
......
...@@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, ...@@ -377,6 +377,9 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
real_mode = rmode; real_mode = rmode;
/* Clear it for solely in-kernel use */
real_mode->hdr.loadflags &= ~KASLR_FLAG;
sanitize_boot_params(real_mode); sanitize_boot_params(real_mode);
if (real_mode->screen_info.orig_video_mode == 7) { if (real_mode->screen_info.orig_video_mode == 7) {
...@@ -401,7 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, ...@@ -401,7 +404,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
* the entire decompressed kernel plus relocation table, or the * the entire decompressed kernel plus relocation table, or the
* entire decompressed kernel plus .bss and .brk sections. * entire decompressed kernel plus .bss and .brk sections.
*/ */
output = choose_kernel_location(input_data, input_len, output, output = choose_kernel_location(real_mode, input_data, input_len, output,
output_len > run_size ? output_len output_len > run_size ? output_len
: run_size); : run_size);
......
...@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option); ...@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
#if CONFIG_RANDOMIZE_BASE #if CONFIG_RANDOMIZE_BASE
/* aslr.c */ /* aslr.c */
unsigned char *choose_kernel_location(unsigned char *input, unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size, unsigned long input_size,
unsigned char *output, unsigned char *output,
unsigned long output_size); unsigned long output_size);
...@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input, ...@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
bool has_cpuflag(int flag); bool has_cpuflag(int flag);
#else #else
static inline static inline
unsigned char *choose_kernel_location(unsigned char *input, unsigned char *choose_kernel_location(struct boot_params *boot_params,
unsigned char *input,
unsigned long input_size, unsigned long input_size,
unsigned char *output, unsigned char *output,
unsigned long output_size) unsigned long output_size)
......
...@@ -178,7 +178,7 @@ continue_block: ...@@ -178,7 +178,7 @@ continue_block:
## 2a) PROCESS FULL BLOCKS: ## 2a) PROCESS FULL BLOCKS:
################################################################ ################################################################
full_block: full_block:
movq $128,%rax movl $128,%eax
lea 128*8*2(block_0), block_1 lea 128*8*2(block_0), block_1
lea 128*8*3(block_0), block_2 lea 128*8*3(block_0), block_2
add $128*8*1, block_0 add $128*8*1, block_0
......
...@@ -264,7 +264,7 @@ ENTRY(twofish_enc_blk) ...@@ -264,7 +264,7 @@ ENTRY(twofish_enc_blk)
movq R1, 8(%rsi) movq R1, 8(%rsi)
popq R1 popq R1
movq $1,%rax movl $1,%eax
ret ret
ENDPROC(twofish_enc_blk) ENDPROC(twofish_enc_blk)
...@@ -316,6 +316,6 @@ ENTRY(twofish_dec_blk) ...@@ -316,6 +316,6 @@ ENTRY(twofish_dec_blk)
movq R1, 8(%rsi) movq R1, 8(%rsi)
popq R1 popq R1
movq $1,%rax movl $1,%eax
ret ret
ENDPROC(twofish_dec_blk) ENDPROC(twofish_dec_blk)
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
# #
obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o
obj-$(CONFIG_IA32_EMULATION) += nosyscall.o syscall_ia32.o
obj-$(CONFIG_IA32_AOUT) += ia32_aout.o obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
......
...@@ -161,8 +161,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) ...@@ -161,8 +161,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
} }
static int ia32_restore_sigcontext(struct pt_regs *regs, static int ia32_restore_sigcontext(struct pt_regs *regs,
struct sigcontext_ia32 __user *sc, struct sigcontext_ia32 __user *sc)
unsigned int *pax)
{ {
unsigned int tmpflags, err = 0; unsigned int tmpflags, err = 0;
void __user *buf; void __user *buf;
...@@ -184,7 +183,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, ...@@ -184,7 +183,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
RELOAD_SEG(es); RELOAD_SEG(es);
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip); COPY(dx); COPY(cx); COPY(ip); COPY(ax);
/* Don't touch extended registers */ /* Don't touch extended registers */
COPY_SEG_CPL3(cs); COPY_SEG_CPL3(cs);
...@@ -197,12 +196,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, ...@@ -197,12 +196,12 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
get_user_ex(tmp, &sc->fpstate); get_user_ex(tmp, &sc->fpstate);
buf = compat_ptr(tmp); buf = compat_ptr(tmp);
get_user_ex(*pax, &sc->ax);
} get_user_catch(err); } get_user_catch(err);
err |= restore_xstate_sig(buf, 1); err |= restore_xstate_sig(buf, 1);
force_iret();
return err; return err;
} }
...@@ -211,7 +210,6 @@ asmlinkage long sys32_sigreturn(void) ...@@ -211,7 +210,6 @@ asmlinkage long sys32_sigreturn(void)
struct pt_regs *regs = current_pt_regs(); struct pt_regs *regs = current_pt_regs();
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
sigset_t set; sigset_t set;
unsigned int ax;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe; goto badframe;
...@@ -224,9 +222,9 @@ asmlinkage long sys32_sigreturn(void) ...@@ -224,9 +222,9 @@ asmlinkage long sys32_sigreturn(void)
set_current_blocked(&set); set_current_blocked(&set);
if (ia32_restore_sigcontext(regs, &frame->sc, &ax)) if (ia32_restore_sigcontext(regs, &frame->sc))
goto badframe; goto badframe;
return ax; return regs->ax;
badframe: badframe:
signal_fault(regs, frame, "32bit sigreturn"); signal_fault(regs, frame, "32bit sigreturn");
...@@ -238,7 +236,6 @@ asmlinkage long sys32_rt_sigreturn(void) ...@@ -238,7 +236,6 @@ asmlinkage long sys32_rt_sigreturn(void)
struct pt_regs *regs = current_pt_regs(); struct pt_regs *regs = current_pt_regs();
struct rt_sigframe_ia32 __user *frame; struct rt_sigframe_ia32 __user *frame;
sigset_t set; sigset_t set;
unsigned int ax;
frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4); frame = (struct rt_sigframe_ia32 __user *)(regs->sp - 4);
...@@ -249,13 +246,13 @@ asmlinkage long sys32_rt_sigreturn(void) ...@@ -249,13 +246,13 @@ asmlinkage long sys32_rt_sigreturn(void)
set_current_blocked(&set); set_current_blocked(&set);
if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe; goto badframe;
if (compat_restore_altstack(&frame->uc.uc_stack)) if (compat_restore_altstack(&frame->uc.uc_stack))
goto badframe; goto badframe;
return ax; return regs->ax;
badframe: badframe:
signal_fault(regs, frame, "32bit rt sigreturn"); signal_fault(regs, frame, "32bit rt sigreturn");
......
This diff is collapsed.
#include <linux/kernel.h>
#include <linux/errno.h>
long compat_ni_syscall(void)
{
return -ENOSYS;
}
...@@ -201,20 +201,6 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, ...@@ -201,20 +201,6 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
advice); advice);
} }
long sys32_vm86_warning(void)
{
struct task_struct *me = current;
static char lastcomm[sizeof(me->comm)];
if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
compat_printk(KERN_INFO
"%s: vm86 mode not supported on 64 bit kernel\n",
me->comm);
strncpy(lastcomm, me->comm, sizeof(lastcomm));
}
return -ENOSYS;
}
asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
size_t count) size_t count)
{ {
......
/* System call table for ia32 emulation. */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <asm/asm-offsets.h>
#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void compat(void) ;
#include <asm/syscalls_32.h>
#undef __SYSCALL_I386
#define __SYSCALL_I386(nr, sym, compat) [nr] = compat,
typedef void (*sys_call_ptr_t)(void);
extern void compat_ni_syscall(void);
const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_ia32_syscall_max] = &compat_ni_syscall,
#include <asm/syscalls_32.h>
};
...@@ -18,12 +18,63 @@ ...@@ -18,12 +18,63 @@
.endm .endm
#endif #endif
.macro altinstruction_entry orig alt feature orig_len alt_len .macro altinstruction_entry orig alt feature orig_len alt_len pad_len
.long \orig - . .long \orig - .
.long \alt - . .long \alt - .
.word \feature .word \feature
.byte \orig_len .byte \orig_len
.byte \alt_len .byte \alt_len
.byte \pad_len
.endm
.macro ALTERNATIVE oldinstr, newinstr, feature
140:
\oldinstr
141:
.skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
142:
.pushsection .altinstructions,"a"
altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
.popsection
.pushsection .altinstr_replacement,"ax"
143:
\newinstr
144:
.popsection
.endm
#define old_len 141b-140b
#define new_len1 144f-143f
#define new_len2 145f-144f
/*
* max without conditionals. Idea adapted from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*/
#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
140:
\oldinstr
141:
.skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
(alt_max_short(new_len1, new_len2) - (old_len)),0x90
142:
.pushsection .altinstructions,"a"
altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b
altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
.popsection
.pushsection .altinstr_replacement,"ax"
143:
\newinstr1
144:
\newinstr2
145:
.popsection
.endm .endm
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -48,8 +48,9 @@ struct alt_instr { ...@@ -48,8 +48,9 @@ struct alt_instr {
s32 repl_offset; /* offset to replacement instruction */ s32 repl_offset; /* offset to replacement instruction */
u16 cpuid; /* cpuid bit set for replacement */ u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */ u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */ u8 replacementlen; /* length of new instruction */
}; u8 padlen; /* length of build-time padding */
} __packed;
extern void alternative_instructions(void); extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
...@@ -76,50 +77,69 @@ static inline int alternatives_text_reserved(void *start, void *end) ...@@ -76,50 +77,69 @@ static inline int alternatives_text_reserved(void *start, void *end)
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n" #define b_replacement(num) "664"#num
#define e_replacement(num) "665"#num
#define b_replacement(number) "663"#number #define alt_end_marker "663"
#define e_replacement(number) "664"#number #define alt_slen "662b-661b"
#define alt_pad_len alt_end_marker"b-662b"
#define alt_total_slen alt_end_marker"b-661b"
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
#define alt_slen "662b-661b" #define __OLDINSTR(oldinstr, num) \
#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f" "661:\n\t" oldinstr "\n662:\n" \
".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
"((" alt_rlen(num) ")-(" alt_slen ")),0x90\n"
#define ALTINSTR_ENTRY(feature, number) \ #define OLDINSTR(oldinstr, num) \
__OLDINSTR(oldinstr, num) \
alt_end_marker ":\n"
/*
* max without conditionals. Idea adapted from:
* http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
*
* The additional "-" is needed because gas works with s32s.
*/
#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
/*
* Pad the second replacement alternative with additional NOPs if it is
* additionally longer than the first replacement alternative.
*/
#define OLDINSTR_2(oldinstr, num1, num2) \
"661:\n\t" oldinstr "\n662:\n" \
".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
"(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \
alt_end_marker ":\n"
#define ALTINSTR_ENTRY(feature, num) \
" .long 661b - .\n" /* label */ \ " .long 661b - .\n" /* label */ \
" .long " b_replacement(number)"f - .\n" /* new instruction */ \ " .long " b_replacement(num)"f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \ " .word " __stringify(feature) "\n" /* feature bit */ \
" .byte " alt_slen "\n" /* source len */ \ " .byte " alt_total_slen "\n" /* source len */ \
" .byte " alt_rlen(number) "\n" /* replacement len */ " .byte " alt_rlen(num) "\n" /* replacement len */ \
" .byte " alt_pad_len "\n" /* pad len */
#define DISCARD_ENTRY(number) /* rlen <= slen */ \
" .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \ #define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t" b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
/* alternative assembly primitive: */ /* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \ #define ALTERNATIVE(oldinstr, newinstr, feature) \
OLDINSTR(oldinstr) \ OLDINSTR(oldinstr, 1) \
".pushsection .altinstructions,\"a\"\n" \ ".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \ ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \ ".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".popsection" ".popsection"
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
OLDINSTR(oldinstr) \ OLDINSTR_2(oldinstr, 1, 2) \
".pushsection .altinstructions,\"a\"\n" \ ".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature1, 1) \ ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \ ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \ ".popsection\n" \
".pushsection .discard,\"aw\",@progbits\n" \
DISCARD_ENTRY(1) \
DISCARD_ENTRY(2) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
...@@ -146,6 +166,9 @@ static inline int alternatives_text_reserved(void *start, void *end) ...@@ -146,6 +166,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define alternative(oldinstr, newinstr, feature) \ #define alternative(oldinstr, newinstr, feature) \
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
/* /*
* Alternative inline assembly with input. * Alternative inline assembly with input.
* *
......
...@@ -91,7 +91,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v) ...@@ -91,7 +91,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
{ {
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP, alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
ASM_OUTPUT2("=r" (v), "=m" (*addr)), ASM_OUTPUT2("=r" (v), "=m" (*addr)),
ASM_OUTPUT2("0" (v), "m" (*addr))); ASM_OUTPUT2("0" (v), "m" (*addr)));
} }
......
...@@ -95,13 +95,11 @@ do { \ ...@@ -95,13 +95,11 @@ do { \
* Stop RDTSC speculation. This is needed when you need to use RDTSC * Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined * (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region. * code region.
*
* (Could use an alternative three way for this if there was one.)
*/ */
static __always_inline void rdtsc_barrier(void) static __always_inline void rdtsc_barrier(void)
{ {
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); "lfence", X86_FEATURE_LFENCE_RDTSC);
} }
#endif /* _ASM_X86_BARRIER_H */ #endif /* _ASM_X86_BARRIER_H */
...@@ -55,143 +55,157 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -55,143 +55,157 @@ For 32-bit we have the following conventions - kernel is built with
* for assembly code: * for assembly code:
*/ */
#define R15 0 /* The layout forms the "struct pt_regs" on the stack: */
#define R14 8 /*
#define R13 16 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
#define R12 24 * unless syscall needs a complete, fully filled "struct pt_regs".
#define RBP 32 */
#define RBX 40 #define R15 0*8
#define R14 1*8
/* arguments: interrupts/non tracing syscalls only save up to here: */ #define R13 2*8
#define R11 48 #define R12 3*8
#define R10 56 #define RBP 4*8
#define R9 64 #define RBX 5*8
#define R8 72 /* These regs are callee-clobbered. Always saved on kernel entry. */
#define RAX 80 #define R11 6*8
#define RCX 88 #define R10 7*8
#define RDX 96 #define R9 8*8
#define RSI 104 #define R8 9*8
#define RDI 112 #define RAX 10*8
#define ORIG_RAX 120 /* + error_code */ #define RCX 11*8
/* end of arguments */ #define RDX 12*8
#define RSI 13*8
/* cpu exception frame or undefined in case of fast syscall: */ #define RDI 14*8
#define RIP 128 /*
#define CS 136 * On syscall entry, this is syscall#. On CPU exception, this is error code.
#define EFLAGS 144 * On hw interrupt, it's IRQ number:
#define RSP 152 */
#define SS 160 #define ORIG_RAX 15*8
/* Return frame for iretq */
#define ARGOFFSET R11 #define RIP 16*8
#define CS 17*8
.macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0 #define EFLAGS 18*8
subq $9*8+\addskip, %rsp #define RSP 19*8
CFI_ADJUST_CFA_OFFSET 9*8+\addskip #define SS 20*8
movq_cfi rdi, 8*8
movq_cfi rsi, 7*8 #define SIZEOF_PTREGS 21*8
movq_cfi rdx, 6*8
.macro ALLOC_PT_GPREGS_ON_STACK addskip=0
.if \save_rcx subq $15*8+\addskip, %rsp
movq_cfi rcx, 5*8 CFI_ADJUST_CFA_OFFSET 15*8+\addskip
.endif .endm
.if \rax_enosys .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
movq $-ENOSYS, 4*8(%rsp) .if \r11
.else movq_cfi r11, 6*8+\offset
movq_cfi rax, 4*8
.endif .endif
.if \r8910
.if \save_r891011 movq_cfi r10, 7*8+\offset
movq_cfi r8, 3*8 movq_cfi r9, 8*8+\offset
movq_cfi r9, 2*8 movq_cfi r8, 9*8+\offset
movq_cfi r10, 1*8 .endif
movq_cfi r11, 0*8 .if \rax
movq_cfi rax, 10*8+\offset
.endif
.if \rcx
movq_cfi rcx, 11*8+\offset
.endif .endif
movq_cfi rdx, 12*8+\offset
movq_cfi rsi, 13*8+\offset
movq_cfi rdi, 14*8+\offset
.endm
.macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
.endm
.macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
.endm
.macro SAVE_C_REGS_EXCEPT_R891011
SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
.endm
.macro SAVE_C_REGS_EXCEPT_RCX_R891011
SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
.endm
.macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
.endm
.macro SAVE_EXTRA_REGS offset=0
movq_cfi r15, 0*8+\offset
movq_cfi r14, 1*8+\offset
movq_cfi r13, 2*8+\offset
movq_cfi r12, 3*8+\offset
movq_cfi rbp, 4*8+\offset
movq_cfi rbx, 5*8+\offset
.endm
.macro SAVE_EXTRA_REGS_RBP offset=0
movq_cfi rbp, 4*8+\offset
.endm
.macro RESTORE_EXTRA_REGS offset=0
movq_cfi_restore 0*8+\offset, r15
movq_cfi_restore 1*8+\offset, r14
movq_cfi_restore 2*8+\offset, r13
movq_cfi_restore 3*8+\offset, r12
movq_cfi_restore 4*8+\offset, rbp
movq_cfi_restore 5*8+\offset, rbx
.endm .endm
#define ARG_SKIP (9*8) .macro ZERO_EXTRA_REGS
xorl %r15d, %r15d
xorl %r14d, %r14d
xorl %r13d, %r13d
xorl %r12d, %r12d
xorl %ebp, %ebp
xorl %ebx, %ebx
.endm
.macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \ .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
rstor_r8910=1, rstor_rdx=1
.if \rstor_r11 .if \rstor_r11
movq_cfi_restore 0*8, r11 movq_cfi_restore 6*8, r11
.endif .endif
.if \rstor_r8910 .if \rstor_r8910
movq_cfi_restore 1*8, r10 movq_cfi_restore 7*8, r10
movq_cfi_restore 2*8, r9 movq_cfi_restore 8*8, r9
movq_cfi_restore 3*8, r8 movq_cfi_restore 9*8, r8
.endif .endif
.if \rstor_rax .if \rstor_rax
movq_cfi_restore 4*8, rax movq_cfi_restore 10*8, rax
.endif .endif
.if \rstor_rcx .if \rstor_rcx
movq_cfi_restore 5*8, rcx movq_cfi_restore 11*8, rcx
.endif .endif
.if \rstor_rdx .if \rstor_rdx
movq_cfi_restore 6*8, rdx movq_cfi_restore 12*8, rdx
.endif
movq_cfi_restore 7*8, rsi
movq_cfi_restore 8*8, rdi
.if ARG_SKIP+\addskip > 0
addq $ARG_SKIP+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
.endif .endif
movq_cfi_restore 13*8, rsi
movq_cfi_restore 14*8, rdi
.endm .endm
.macro RESTORE_C_REGS
.macro LOAD_ARGS offset, skiprax=0 RESTORE_C_REGS_HELPER 1,1,1,1,1
movq \offset(%rsp), %r11
movq \offset+8(%rsp), %r10
movq \offset+16(%rsp), %r9
movq \offset+24(%rsp), %r8
movq \offset+40(%rsp), %rcx
movq \offset+48(%rsp), %rdx
movq \offset+56(%rsp), %rsi
movq \offset+64(%rsp), %rdi
.if \skiprax
.else
movq \offset+72(%rsp), %rax
.endif
.endm .endm
.macro RESTORE_C_REGS_EXCEPT_RAX
#define REST_SKIP (6*8) RESTORE_C_REGS_HELPER 0,1,1,1,1
.macro SAVE_REST
subq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET REST_SKIP
movq_cfi rbx, 5*8
movq_cfi rbp, 4*8
movq_cfi r12, 3*8
movq_cfi r13, 2*8
movq_cfi r14, 1*8
movq_cfi r15, 0*8
.endm .endm
.macro RESTORE_C_REGS_EXCEPT_RCX
.macro RESTORE_REST RESTORE_C_REGS_HELPER 1,0,1,1,1
movq_cfi_restore 0*8, r15
movq_cfi_restore 1*8, r14
movq_cfi_restore 2*8, r13
movq_cfi_restore 3*8, r12
movq_cfi_restore 4*8, rbp
movq_cfi_restore 5*8, rbx
addq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
.endm .endm
.macro RESTORE_C_REGS_EXCEPT_R11
.macro SAVE_ALL RESTORE_C_REGS_HELPER 1,1,0,1,1
SAVE_ARGS .endm
SAVE_REST .macro RESTORE_C_REGS_EXCEPT_RCX_R11
RESTORE_C_REGS_HELPER 1,0,0,1,1
.endm
.macro RESTORE_RSI_RDI
RESTORE_C_REGS_HELPER 0,0,0,0,0
.endm
.macro RESTORE_RSI_RDI_RDX
RESTORE_C_REGS_HELPER 0,0,0,0,1
.endm .endm
.macro RESTORE_ALL addskip=0 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
RESTORE_REST addq $15*8+\addskip, %rsp
RESTORE_ARGS 1, \addskip CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
.endm .endm
.macro icebp .macro icebp
...@@ -210,37 +224,23 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -210,37 +224,23 @@ For 32-bit we have the following conventions - kernel is built with
*/ */
.macro SAVE_ALL .macro SAVE_ALL
pushl_cfi %eax pushl_cfi_reg eax
CFI_REL_OFFSET eax, 0 pushl_cfi_reg ebp
pushl_cfi %ebp pushl_cfi_reg edi
CFI_REL_OFFSET ebp, 0 pushl_cfi_reg esi
pushl_cfi %edi pushl_cfi_reg edx
CFI_REL_OFFSET edi, 0 pushl_cfi_reg ecx
pushl_cfi %esi pushl_cfi_reg ebx
CFI_REL_OFFSET esi, 0
pushl_cfi %edx
CFI_REL_OFFSET edx, 0
pushl_cfi %ecx
CFI_REL_OFFSET ecx, 0
pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
.endm .endm
.macro RESTORE_ALL .macro RESTORE_ALL
popl_cfi %ebx popl_cfi_reg ebx
CFI_RESTORE ebx popl_cfi_reg ecx
popl_cfi %ecx popl_cfi_reg edx
CFI_RESTORE ecx popl_cfi_reg esi
popl_cfi %edx popl_cfi_reg edi
CFI_RESTORE edx popl_cfi_reg ebp
popl_cfi %esi popl_cfi_reg eax
CFI_RESTORE esi
popl_cfi %edi
CFI_RESTORE edi
popl_cfi %ebp
CFI_RESTORE ebp
popl_cfi %eax
CFI_RESTORE eax
.endm .endm
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
......
...@@ -301,7 +301,7 @@ static inline void __user *arch_compat_alloc_user_space(long len) ...@@ -301,7 +301,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
sp = task_pt_regs(current)->sp; sp = task_pt_regs(current)->sp;
} else { } else {
/* -128 for the x32 ABI redzone */ /* -128 for the x32 ABI redzone */
sp = this_cpu_read(old_rsp) - 128; sp = task_pt_regs(current)->sp - 128;
} }
return (void __user *)round_down(sp - len, 16); return (void __user *)round_down(sp - len, 16);
......
...@@ -231,7 +231,9 @@ ...@@ -231,7 +231,9 @@
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
...@@ -418,6 +420,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) ...@@ -418,6 +420,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
" .word %P0\n" /* 1: do replace */ " .word %P0\n" /* 1: do replace */
" .byte 2b - 1b\n" /* source len */ " .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */ " .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n" ".previous\n"
/* skipping size check since replacement size = 0 */ /* skipping size check since replacement size = 0 */
: : "i" (X86_FEATURE_ALWAYS) : : t_warn); : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
...@@ -432,6 +435,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) ...@@ -432,6 +435,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
" .word %P0\n" /* feature bit */ " .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */ " .byte 2b - 1b\n" /* source len */
" .byte 0\n" /* replacement len */ " .byte 0\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n" ".previous\n"
/* skipping size check since replacement size = 0 */ /* skipping size check since replacement size = 0 */
: : "i" (bit) : : t_no); : : "i" (bit) : : t_no);
...@@ -457,6 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) ...@@ -457,6 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
" .word %P1\n" /* feature bit */ " .word %P1\n" /* feature bit */
" .byte 2b - 1b\n" /* source len */ " .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */ " .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n" ".previous\n"
".section .discard,\"aw\",@progbits\n" ".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
...@@ -483,31 +488,30 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) ...@@ -483,31 +488,30 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
static __always_inline __pure bool _static_cpu_has_safe(u16 bit) static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
{ {
#ifdef CC_HAVE_ASM_GOTO #ifdef CC_HAVE_ASM_GOTO
/* asm_volatile_goto("1: jmp %l[t_dynamic]\n"
* We need to spell the jumps to the compiler because, depending on the offset,
* the replacement jump can be bigger than the original jump, and this we cannot
* have. Thus, we force the jump to the widest, 4-byte, signed relative
* offset even though the last would often fit in less bytes.
*/
asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
"2:\n" "2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"
"3:\n"
".section .altinstructions,\"a\"\n" ".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */ " .long 1b - .\n" /* src offset */
" .long 3f - .\n" /* repl offset */ " .long 4f - .\n" /* repl offset */
" .word %P1\n" /* always replace */ " .word %P1\n" /* always replace */
" .byte 2b - 1b\n" /* src len */ " .byte 3b - 1b\n" /* src len */
" .byte 4f - 3f\n" /* repl len */ " .byte 5f - 4f\n" /* repl len */
" .byte 3b - 2b\n" /* pad len */
".previous\n" ".previous\n"
".section .altinstr_replacement,\"ax\"\n" ".section .altinstr_replacement,\"ax\"\n"
"3: .byte 0xe9\n .long %l[t_no] - 2b\n" "4: jmp %l[t_no]\n"
"4:\n" "5:\n"
".previous\n" ".previous\n"
".section .altinstructions,\"a\"\n" ".section .altinstructions,\"a\"\n"
" .long 1b - .\n" /* src offset */ " .long 1b - .\n" /* src offset */
" .long 0\n" /* no replacement */ " .long 0\n" /* no replacement */
" .word %P0\n" /* feature bit */ " .word %P0\n" /* feature bit */
" .byte 2b - 1b\n" /* src len */ " .byte 3b - 1b\n" /* src len */
" .byte 0\n" /* repl len */ " .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n" ".previous\n"
: : "i" (bit), "i" (X86_FEATURE_ALWAYS) : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
: : t_dynamic, t_no); : : t_dynamic, t_no);
...@@ -527,6 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) ...@@ -527,6 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
" .word %P2\n" /* always replace */ " .word %P2\n" /* always replace */
" .byte 2b - 1b\n" /* source len */ " .byte 2b - 1b\n" /* source len */
" .byte 4f - 3f\n" /* replacement len */ " .byte 4f - 3f\n" /* replacement len */
" .byte 0\n" /* pad len */
".previous\n" ".previous\n"
".section .discard,\"aw\",@progbits\n" ".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
...@@ -541,6 +546,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) ...@@ -541,6 +546,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
" .word %P1\n" /* feature bit */ " .word %P1\n" /* feature bit */
" .byte 4b - 3b\n" /* src len */ " .byte 4b - 3b\n" /* src len */
" .byte 6f - 5f\n" /* repl len */ " .byte 6f - 5f\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n" ".previous\n"
".section .discard,\"aw\",@progbits\n" ".section .discard,\"aw\",@progbits\n"
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */ " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
......
...@@ -376,11 +376,16 @@ static inline void _set_gate(int gate, unsigned type, void *addr, ...@@ -376,11 +376,16 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
* Pentium F0 0F bugfix can have resulted in the mapped * Pentium F0 0F bugfix can have resulted in the mapped
* IDT being write-protected. * IDT being write-protected.
*/ */
#define set_intr_gate(n, addr) \ #define set_intr_gate_notrace(n, addr) \
do { \ do { \
BUG_ON((unsigned)n > 0xFF); \ BUG_ON((unsigned)n > 0xFF); \
_set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \ _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
__KERNEL_CS); \ __KERNEL_CS); \
} while (0)
#define set_intr_gate(n, addr) \
do { \
set_intr_gate_notrace(n, addr); \
_trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\ _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
0, 0, __KERNEL_CS); \ 0, 0, __KERNEL_CS); \
} while (0) } while (0)
......
...@@ -86,11 +86,23 @@ ...@@ -86,11 +86,23 @@
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
.endm .endm
.macro pushq_cfi_reg reg
pushq %\reg
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET \reg, 0
.endm
.macro popq_cfi reg .macro popq_cfi reg
popq \reg popq \reg
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
.endm .endm
.macro popq_cfi_reg reg
popq %\reg
CFI_ADJUST_CFA_OFFSET -8
CFI_RESTORE \reg
.endm
.macro pushfq_cfi .macro pushfq_cfi
pushfq pushfq
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
...@@ -116,11 +128,23 @@ ...@@ -116,11 +128,23 @@
CFI_ADJUST_CFA_OFFSET 4 CFI_ADJUST_CFA_OFFSET 4
.endm .endm
.macro pushl_cfi_reg reg
pushl %\reg
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET \reg, 0
.endm
.macro popl_cfi reg .macro popl_cfi reg
popl \reg popl \reg
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -4
.endm .endm
.macro popl_cfi_reg reg
popl %\reg
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE \reg
.endm
.macro pushfl_cfi .macro pushfl_cfi
pushfl pushfl
CFI_ADJUST_CFA_OFFSET 4 CFI_ADJUST_CFA_OFFSET 4
......
...@@ -171,10 +171,11 @@ do { \ ...@@ -171,10 +171,11 @@ do { \
static inline void elf_common_init(struct thread_struct *t, static inline void elf_common_init(struct thread_struct *t,
struct pt_regs *regs, const u16 ds) struct pt_regs *regs, const u16 ds)
{ {
regs->ax = regs->bx = regs->cx = regs->dx = 0; /* Commented-out registers are cleared in stub_execve */
regs->si = regs->di = regs->bp = 0; /*regs->ax = regs->bx =*/ regs->cx = regs->dx = 0;
regs->si = regs->di /*= regs->bp*/ = 0;
regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0; regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; /*regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;*/
t->fs = t->gs = 0; t->fs = t->gs = 0;
t->fsindex = t->gsindex = 0; t->fsindex = t->gsindex = 0;
t->ds = t->es = ds; t->ds = t->es = ds;
......
...@@ -181,10 +181,9 @@ extern __visible void smp_call_function_single_interrupt(struct pt_regs *); ...@@ -181,10 +181,9 @@ extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
extern __visible void smp_invalidate_interrupt(struct pt_regs *); extern __visible void smp_invalidate_interrupt(struct pt_regs *);
#endif #endif
extern void (*__initconst interrupt[FIRST_SYSTEM_VECTOR extern char irq_entries_start[];
- FIRST_EXTERNAL_VECTOR])(void);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
#define trace_interrupt interrupt #define trace_irq_entries_start irq_entries_start
#endif #endif
#define VECTOR_UNDEFINED (-1) #define VECTOR_UNDEFINED (-1)
......
...@@ -69,7 +69,7 @@ struct insn { ...@@ -69,7 +69,7 @@ struct insn {
const insn_byte_t *next_byte; const insn_byte_t *next_byte;
}; };
#define MAX_INSN_SIZE 16 #define MAX_INSN_SIZE 15
#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) #define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) #define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
......
...@@ -136,10 +136,6 @@ static inline notrace unsigned long arch_local_irq_save(void) ...@@ -136,10 +136,6 @@ static inline notrace unsigned long arch_local_irq_save(void)
#define USERGS_SYSRET32 \ #define USERGS_SYSRET32 \
swapgs; \ swapgs; \
sysretl sysretl
#define ENABLE_INTERRUPTS_SYSEXIT32 \
swapgs; \
sti; \
sysexit
#else #else
#define INTERRUPT_RETURN iret #define INTERRUPT_RETURN iret
...@@ -163,22 +159,27 @@ static inline int arch_irqs_disabled(void) ...@@ -163,22 +159,27 @@ static inline int arch_irqs_disabled(void)
return arch_irqs_disabled_flags(flags); return arch_irqs_disabled_flags(flags);
} }
#endif /* !__ASSEMBLY__ */
#ifdef __ASSEMBLY__
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
#else #else
# define TRACE_IRQS_ON
#ifdef CONFIG_X86_64 # define TRACE_IRQS_OFF
#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk #endif
#define ARCH_LOCKDEP_SYS_EXIT_IRQ \ #ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_X86_64
# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
# define LOCKDEP_SYS_EXIT_IRQ \
TRACE_IRQS_ON; \ TRACE_IRQS_ON; \
sti; \ sti; \
SAVE_REST; \ call lockdep_sys_exit_thunk; \
LOCKDEP_SYS_EXIT; \
RESTORE_REST; \
cli; \ cli; \
TRACE_IRQS_OFF; TRACE_IRQS_OFF;
# else
#else # define LOCKDEP_SYS_EXIT \
#define ARCH_LOCKDEP_SYS_EXIT \
pushl %eax; \ pushl %eax; \
pushl %ecx; \ pushl %ecx; \
pushl %edx; \ pushl %edx; \
...@@ -186,24 +187,12 @@ static inline int arch_irqs_disabled(void) ...@@ -186,24 +187,12 @@ static inline int arch_irqs_disabled(void)
popl %edx; \ popl %edx; \
popl %ecx; \ popl %ecx; \
popl %eax; popl %eax;
# define LOCKDEP_SYS_EXIT_IRQ
#define ARCH_LOCKDEP_SYS_EXIT_IRQ # endif
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
#else #else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
# else
# define LOCKDEP_SYS_EXIT # define LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ # define LOCKDEP_SYS_EXIT_IRQ
# endif #endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -976,11 +976,6 @@ extern void default_banner(void); ...@@ -976,11 +976,6 @@ extern void default_banner(void);
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
CLBR_NONE, \ CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
#define ENABLE_INTERRUPTS_SYSEXIT32 \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -210,8 +210,23 @@ struct x86_hw_tss { ...@@ -210,8 +210,23 @@ struct x86_hw_tss {
unsigned long sp0; unsigned long sp0;
unsigned short ss0, __ss0h; unsigned short ss0, __ss0h;
unsigned long sp1; unsigned long sp1;
/* ss1 caches MSR_IA32_SYSENTER_CS: */
unsigned short ss1, __ss1h; /*
* We don't use ring 1, so ss1 is a convenient scratch space in
* the same cacheline as sp0. We use ss1 to cache the value in
* MSR_IA32_SYSENTER_CS. When we context switch
* MSR_IA32_SYSENTER_CS, we first check if the new value being
* written matches ss1, and, if it's not, then we wrmsr the new
* value and update ss1.
*
* The only reason we context switch MSR_IA32_SYSENTER_CS is
* that we set it to zero in vm86 tasks to avoid corrupting the
* stack if we were to go through the sysenter path from vm86
* mode.
*/
unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
unsigned short __ss1h;
unsigned long sp2; unsigned long sp2;
unsigned short ss2, __ss2h; unsigned short ss2, __ss2h;
unsigned long __cr3; unsigned long __cr3;
...@@ -276,13 +291,17 @@ struct tss_struct { ...@@ -276,13 +291,17 @@ struct tss_struct {
unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
/* /*
* .. and then another 0x100 bytes for the emergency kernel stack: * Space for the temporary SYSENTER stack:
*/ */
unsigned long stack[64]; unsigned long SYSENTER_stack[64];
} ____cacheline_aligned; } ____cacheline_aligned;
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#endif
/* /*
* Save the original ist values for checking stack pointers during debugging * Save the original ist values for checking stack pointers during debugging
...@@ -474,7 +493,6 @@ struct thread_struct { ...@@ -474,7 +493,6 @@ struct thread_struct {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
unsigned long sysenter_cs; unsigned long sysenter_cs;
#else #else
unsigned long usersp; /* Copy from PDA */
unsigned short es; unsigned short es;
unsigned short ds; unsigned short ds;
unsigned short fsindex; unsigned short fsindex;
...@@ -564,6 +582,16 @@ static inline void native_swapgs(void) ...@@ -564,6 +582,16 @@ static inline void native_swapgs(void)
#endif #endif
} }
static inline unsigned long current_top_of_stack(void)
{
#ifdef CONFIG_X86_64
return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
#else
/* sp0 on x86_32 is special in and around vm86 mode. */
return this_cpu_read_stable(cpu_current_top_of_stack);
#endif
}
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
...@@ -761,10 +789,10 @@ extern char ignore_fpu_irq; ...@@ -761,10 +789,10 @@ extern char ignore_fpu_irq;
#define ARCH_HAS_SPINLOCK_PREFETCH #define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# define BASE_PREFETCH ASM_NOP4 # define BASE_PREFETCH ""
# define ARCH_HAS_PREFETCH # define ARCH_HAS_PREFETCH
#else #else
# define BASE_PREFETCH "prefetcht0 (%1)" # define BASE_PREFETCH "prefetcht0 %P1"
#endif #endif
/* /*
...@@ -775,10 +803,9 @@ extern char ignore_fpu_irq; ...@@ -775,10 +803,9 @@ extern char ignore_fpu_irq;
*/ */
static inline void prefetch(const void *x) static inline void prefetch(const void *x)
{ {
alternative_input(BASE_PREFETCH, alternative_input(BASE_PREFETCH, "prefetchnta %P1",
"prefetchnta (%1)",
X86_FEATURE_XMM, X86_FEATURE_XMM,
"r" (x)); "m" (*(const char *)x));
} }
/* /*
...@@ -788,10 +815,9 @@ static inline void prefetch(const void *x) ...@@ -788,10 +815,9 @@ static inline void prefetch(const void *x)
*/ */
static inline void prefetchw(const void *x) static inline void prefetchw(const void *x)
{ {
alternative_input(BASE_PREFETCH, alternative_input(BASE_PREFETCH, "prefetchw %P1",
"prefetchw (%1)", X86_FEATURE_3DNOWPREFETCH,
X86_FEATURE_3DNOW, "m" (*(const char *)x));
"r" (x));
} }
static inline void spin_lock_prefetch(const void *x) static inline void spin_lock_prefetch(const void *x)
...@@ -799,6 +825,9 @@ static inline void spin_lock_prefetch(const void *x) ...@@ -799,6 +825,9 @@ static inline void spin_lock_prefetch(const void *x)
prefetchw(x); prefetchw(x);
} }
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
TOP_OF_KERNEL_STACK_PADDING)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
* User space process size: 3GB (default). * User space process size: 3GB (default).
...@@ -809,39 +838,16 @@ static inline void spin_lock_prefetch(const void *x) ...@@ -809,39 +838,16 @@ static inline void spin_lock_prefetch(const void *x)
#define STACK_TOP_MAX STACK_TOP #define STACK_TOP_MAX STACK_TOP
#define INIT_THREAD { \ #define INIT_THREAD { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \ .sp0 = TOP_OF_INIT_STACK, \
.vm86_info = NULL, \ .vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \ .sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \ .io_bitmap_ptr = NULL, \
} }
/*
* Note that the .io_bitmap member must be extra-big. This is because
* the CPU will access an additional byte beyond the end of the IO
* permission bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
#define INIT_TSS { \
.x86_tss = { \
.sp0 = sizeof(init_stack) + (long)&init_stack, \
.ss0 = __KERNEL_DS, \
.ss1 = __KERNEL_CS, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
}, \
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
}
extern unsigned long thread_saved_pc(struct task_struct *tsk); extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
#define KSTK_TOP(info) \
({ \
unsigned long *__ptr = (unsigned long *)(info); \
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
})
/* /*
* The below -8 is to reserve 8 bytes on top of the ring0 stack. * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
* This is necessary to guarantee that the entire "struct pt_regs" * This is necessary to guarantee that the entire "struct pt_regs"
* is accessible even if the CPU haven't stored the SS/ESP registers * is accessible even if the CPU haven't stored the SS/ESP registers
* on the stack (interrupt gate does not save these registers * on the stack (interrupt gate does not save these registers
...@@ -850,11 +856,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -850,11 +856,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
* "struct pt_regs" is possible, but they may contain the * "struct pt_regs" is possible, but they may contain the
* completely wrong values. * completely wrong values.
*/ */
#define task_pt_regs(task) \ #define task_pt_regs(task) \
({ \ ({ \
struct pt_regs *__regs__; \ unsigned long __ptr = (unsigned long)task_stack_page(task); \
__regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
__regs__ - 1; \ ((struct pt_regs *)__ptr) - 1; \
}) })
#define KSTK_ESP(task) (task_pt_regs(task)->sp) #define KSTK_ESP(task) (task_pt_regs(task)->sp)
...@@ -886,11 +892,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -886,11 +892,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define STACK_TOP_MAX TASK_SIZE_MAX #define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \ #define INIT_THREAD { \
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ .sp0 = TOP_OF_INIT_STACK \
}
#define INIT_TSS { \
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
} }
/* /*
...@@ -902,11 +904,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -902,11 +904,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task); extern unsigned long KSTK_ESP(struct task_struct *task);
/*
* User space RSP while inside the SYSCALL fast path
*/
DECLARE_PER_CPU(unsigned long, old_rsp);
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
extern void start_thread(struct pt_regs *regs, unsigned long new_ip, extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
......
...@@ -31,13 +31,17 @@ struct pt_regs { ...@@ -31,13 +31,17 @@ struct pt_regs {
#else /* __i386__ */ #else /* __i386__ */
struct pt_regs { struct pt_regs {
/*
* C ABI says these regs are callee-preserved. They aren't saved on kernel entry
* unless syscall needs a complete, fully filled "struct pt_regs".
*/
unsigned long r15; unsigned long r15;
unsigned long r14; unsigned long r14;
unsigned long r13; unsigned long r13;
unsigned long r12; unsigned long r12;
unsigned long bp; unsigned long bp;
unsigned long bx; unsigned long bx;
/* arguments: non interrupts/non tracing syscalls only save up to here*/ /* These regs are callee-clobbered. Always saved on kernel entry. */
unsigned long r11; unsigned long r11;
unsigned long r10; unsigned long r10;
unsigned long r9; unsigned long r9;
...@@ -47,9 +51,12 @@ struct pt_regs { ...@@ -47,9 +51,12 @@ struct pt_regs {
unsigned long dx; unsigned long dx;
unsigned long si; unsigned long si;
unsigned long di; unsigned long di;
/*
* On syscall entry, this is syscall#. On CPU exception, this is error code.
* On hw interrupt, it's IRQ number:
*/
unsigned long orig_ax; unsigned long orig_ax;
/* end of arguments */ /* Return frame for iretq */
/* cpu exception frame or undefined */
unsigned long ip; unsigned long ip;
unsigned long cs; unsigned long cs;
unsigned long flags; unsigned long flags;
...@@ -89,11 +96,13 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) ...@@ -89,11 +96,13 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
} }
/* /*
* user_mode_vm(regs) determines whether a register set came from user mode. * user_mode(regs) determines whether a register set came from user
* This is true if V8086 mode was enabled OR if the register set was from * mode. On x86_32, this is true if V8086 mode was enabled OR if the
* protected mode with RPL-3 CS value. This tricky test checks that with * register set was from protected mode with RPL-3 CS value. This
* one comparison. Many places in the kernel can bypass this full check * tricky test checks that with one comparison.
* if they have already ruled out V8086 mode, so user_mode(regs) can be used. *
* On x86_64, vm86 mode is mercifully nonexistent, and we don't need
* the extra check.
*/ */
static inline int user_mode(struct pt_regs *regs) static inline int user_mode(struct pt_regs *regs)
{ {
...@@ -104,16 +113,6 @@ static inline int user_mode(struct pt_regs *regs) ...@@ -104,16 +113,6 @@ static inline int user_mode(struct pt_regs *regs)
#endif #endif
} }
static inline int user_mode_vm(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
USER_RPL;
#else
return user_mode(regs);
#endif
}
static inline int v8086_mode(struct pt_regs *regs) static inline int v8086_mode(struct pt_regs *regs)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -138,12 +137,8 @@ static inline bool user_64bit_mode(struct pt_regs *regs) ...@@ -138,12 +137,8 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
#endif #endif
} }
#define current_user_stack_pointer() this_cpu_read(old_rsp) #define current_user_stack_pointer() current_pt_regs()->sp
/* ia32 vs. x32 difference */ #define compat_user_stack_pointer() current_pt_regs()->sp
#define compat_user_stack_pointer() \
(test_thread_flag(TIF_IA32) \
? current_pt_regs()->sp \
: this_cpu_read(old_rsp))
#endif #endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -248,7 +243,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, ...@@ -248,7 +243,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
*/ */
#define arch_ptrace_stop_needed(code, info) \ #define arch_ptrace_stop_needed(code, info) \
({ \ ({ \
set_thread_flag(TIF_NOTIFY_RESUME); \ force_iret(); \
false; \ false; \
}) })
......
This diff is collapsed.
...@@ -66,6 +66,11 @@ static inline void x86_ce4100_early_setup(void) { } ...@@ -66,6 +66,11 @@ static inline void x86_ce4100_early_setup(void) { }
*/ */
extern struct boot_params boot_params; extern struct boot_params boot_params;
static inline bool kaslr_enabled(void)
{
return !!(boot_params.hdr.loadflags & KASLR_FLAG);
}
/* /*
* Do NOT EVER look at the BIOS memory size location. * Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines. * It does not work on many machines.
......
...@@ -57,9 +57,9 @@ struct sigcontext { ...@@ -57,9 +57,9 @@ struct sigcontext {
unsigned long ip; unsigned long ip;
unsigned long flags; unsigned long flags;
unsigned short cs; unsigned short cs;
unsigned short gs; unsigned short __pad2; /* Was called gs, but was always zero. */
unsigned short fs; unsigned short __pad1; /* Was called fs, but was always zero. */
unsigned short __pad0; unsigned short ss;
unsigned long err; unsigned long err;
unsigned long trapno; unsigned long trapno;
unsigned long oldmask; unsigned long oldmask;
......
...@@ -13,9 +13,7 @@ ...@@ -13,9 +13,7 @@
X86_EFLAGS_CF | X86_EFLAGS_RF) X86_EFLAGS_CF | X86_EFLAGS_RF)
void signal_fault(struct pt_regs *regs, void __user *frame, char *where); void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc);
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
unsigned long *pax);
int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
struct pt_regs *regs, unsigned long mask); struct pt_regs *regs, unsigned long mask);
......
...@@ -27,23 +27,11 @@ ...@@ -27,23 +27,11 @@
#ifdef CONFIG_X86_SMAP #ifdef CONFIG_X86_SMAP
#define ASM_CLAC \ #define ASM_CLAC \
661: ASM_NOP3 ; \ ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
.pushsection .altinstr_replacement, "ax" ; \
662: __ASM_CLAC ; \ #define ASM_STAC \
.popsection ; \ ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
.pushsection .altinstructions, "a" ; \
altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
.popsection
#define ASM_STAC \
661: ASM_NOP3 ; \
.pushsection .altinstr_replacement, "ax" ; \
662: __ASM_STAC ; \
.popsection ; \
.pushsection .altinstructions, "a" ; \
altinstruction_entry 661b, 662b, X86_FEATURE_SMAP, 3, 3 ; \
.popsection
#else /* CONFIG_X86_SMAP */ #else /* CONFIG_X86_SMAP */
...@@ -61,20 +49,20 @@ ...@@ -61,20 +49,20 @@
static __always_inline void clac(void) static __always_inline void clac(void)
{ {
/* Note: a barrier is implicit in alternative() */ /* Note: a barrier is implicit in alternative() */
alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP); alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
} }
static __always_inline void stac(void) static __always_inline void stac(void)
{ {
/* Note: a barrier is implicit in alternative() */ /* Note: a barrier is implicit in alternative() */
alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP); alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
} }
/* These macros can be used in asm() statements */ /* These macros can be used in asm() statements */
#define ASM_CLAC \ #define ASM_CLAC \
ALTERNATIVE(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP) ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
#define ASM_STAC \ #define ASM_STAC \
ALTERNATIVE(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP) ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
#else /* CONFIG_X86_SMAP */ #else /* CONFIG_X86_SMAP */
......
...@@ -154,6 +154,7 @@ void cpu_die_common(unsigned int cpu); ...@@ -154,6 +154,7 @@ void cpu_die_common(unsigned int cpu);
void native_smp_prepare_boot_cpu(void); void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus);
void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void); int native_cpu_disable(void);
void native_cpu_die(unsigned int cpu); void native_cpu_die(unsigned int cpu);
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/nops.h>
static inline void native_clts(void) static inline void native_clts(void)
{ {
asm volatile("clts"); asm volatile("clts");
...@@ -199,6 +201,28 @@ static inline void clflushopt(volatile void *__p) ...@@ -199,6 +201,28 @@ static inline void clflushopt(volatile void *__p)
"+m" (*(volatile char __force *)__p)); "+m" (*(volatile char __force *)__p));
} }
static inline void clwb(volatile void *__p)
{
volatile struct { char x[64]; } *p = __p;
asm volatile(ALTERNATIVE_2(
".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
X86_FEATURE_CLFLUSHOPT,
".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
X86_FEATURE_CLWB)
: [p] "+m" (*p)
: [pax] "a" (p));
}
static inline void pcommit_sfence(void)
{
alternative(ASM_NOP7,
".byte 0x66, 0x0f, 0xae, 0xf8\n\t" /* pcommit */
"sfence",
X86_FEATURE_PCOMMIT);
}
#define nop() asm volatile ("nop") #define nop() asm volatile ("nop")
......
...@@ -12,6 +12,33 @@ ...@@ -12,6 +12,33 @@
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/types.h> #include <asm/types.h>
/*
* TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we
* reserve at the top of the kernel stack. We do it because of a nasty
* 32-bit corner case. On x86_32, the hardware stack frame is
* variable-length. Except for vm86 mode, struct pt_regs assumes a
* maximum-length frame. If we enter from CPL 0, the top 8 bytes of
* pt_regs don't actually exist. Ordinarily this doesn't matter, but it
* does in at least one case:
*
* If we take an NMI early enough in SYSENTER, then we can end up with
* pt_regs that extends above sp0. On the way out, in the espfix code,
* we can read the saved SS value, but that value will be above sp0.
* Without this offset, that can result in a page fault. (We are
* careful that, in this case, the value we read doesn't matter.)
*
* In vm86 mode, the hardware frame is much longer still, but we neither
* access the extra members from NMI context, nor do we write such a
* frame at sp0 at all.
*
* x86_64 has a fixed-length stack frame.
*/
#ifdef CONFIG_X86_32
# define TOP_OF_KERNEL_STACK_PADDING 8
#else
# define TOP_OF_KERNEL_STACK_PADDING 0
#endif
/* /*
* low level task data that entry.S needs immediate access to * low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line * - this struct should fit entirely inside of one cache line
...@@ -145,7 +172,6 @@ struct thread_info { ...@@ -145,7 +172,6 @@ struct thread_info {
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
#define STACK_WARN (THREAD_SIZE/8) #define STACK_WARN (THREAD_SIZE/8)
#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
/* /*
* macros/functions for gaining access to the thread information structure * macros/functions for gaining access to the thread information structure
...@@ -158,10 +184,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack); ...@@ -158,10 +184,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
struct thread_info *ti; return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
ti = (void *)(this_cpu_read_stable(kernel_stack) +
KERNEL_STACK_OFFSET - THREAD_SIZE);
return ti;
} }
static inline unsigned long current_stack_pointer(void) static inline unsigned long current_stack_pointer(void)
...@@ -177,16 +200,37 @@ static inline unsigned long current_stack_pointer(void) ...@@ -177,16 +200,37 @@ static inline unsigned long current_stack_pointer(void)
#else /* !__ASSEMBLY__ */ #else /* !__ASSEMBLY__ */
/* how to get the thread information struct from ASM */ /* Load thread_info address into "reg" */
#define GET_THREAD_INFO(reg) \ #define GET_THREAD_INFO(reg) \
_ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \ _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
_ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ; _ASM_SUB $(THREAD_SIZE),reg ;
/* /*
* Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in * ASM operand which evaluates to a 'thread_info' address of
* a certain register (to be used in assembler memory operands). * the current task, if it is known that "reg" is exactly "off"
* bytes below the top of the stack currently.
*
* ( The kernel stack's size is known at build time, it is usually
* 2 or 4 pages, and the bottom of the kernel stack contains
* the thread_info structure. So to access the thread_info very
* quickly from assembly code we can calculate down from the
* top of the kernel stack to the bottom, using constant,
* build-time calculations only. )
*
* For example, to fetch the current thread_info->flags value into %eax
* on x86-64 defconfig kernels, in syscall entry code where RSP is
* currently at exactly SIZEOF_PTREGS bytes away from the top of the
* stack:
*
* mov ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS), %eax
*
* will translate to:
*
* 8b 84 24 b8 c0 ff ff mov -0x3f48(%rsp), %eax
*
* which is below the current RSP by almost 16K.
*/ */
#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg) #define ASM_THREAD_INFO(field, reg, off) ((field)+(off)-THREAD_SIZE)(reg)
#endif #endif
...@@ -236,6 +280,16 @@ static inline bool is_ia32_task(void) ...@@ -236,6 +280,16 @@ static inline bool is_ia32_task(void)
#endif #endif
return false; return false;
} }
/*
* Force syscall return via IRET by making it look as if there was
* some work pending. IRET is our most capable (but slowest) syscall
* return path, which is able to restore modified SS, CS and certain
* EFLAGS values that other (fast) syscall return instructions
* are not able to restore properly.
*/
#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
/* loadflags */ /* loadflags */
#define LOADED_HIGH (1<<0) #define LOADED_HIGH (1<<0)
#define KASLR_FLAG (1<<1)
#define QUIET_FLAG (1<<5) #define QUIET_FLAG (1<<5)
#define KEEP_SEGMENTS (1<<6) #define KEEP_SEGMENTS (1<<6)
#define CAN_USE_HEAP (1<<7) #define CAN_USE_HEAP (1<<7)
......
...@@ -25,13 +25,17 @@ ...@@ -25,13 +25,17 @@
#else /* __i386__ */ #else /* __i386__ */
#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) #if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
/*
* C ABI says these regs are callee-preserved. They aren't saved on kernel entry
* unless syscall needs a complete, fully filled "struct pt_regs".
*/
#define R15 0 #define R15 0
#define R14 8 #define R14 8
#define R13 16 #define R13 16
#define R12 24 #define R12 24
#define RBP 32 #define RBP 32
#define RBX 40 #define RBX 40
/* arguments: interrupts/non tracing syscalls only save up to here*/ /* These regs are callee-clobbered. Always saved on kernel entry. */
#define R11 48 #define R11 48
#define R10 56 #define R10 56
#define R9 64 #define R9 64
...@@ -41,15 +45,17 @@ ...@@ -41,15 +45,17 @@
#define RDX 96 #define RDX 96
#define RSI 104 #define RSI 104
#define RDI 112 #define RDI 112
#define ORIG_RAX 120 /* = ERROR */ /*
/* end of arguments */ * On syscall entry, this is syscall#. On CPU exception, this is error code.
/* cpu exception frame or undefined in case of fast syscall. */ * On hw interrupt, it's IRQ number:
*/
#define ORIG_RAX 120
/* Return frame for iretq */
#define RIP 128 #define RIP 128
#define CS 136 #define CS 136
#define EFLAGS 144 #define EFLAGS 144
#define RSP 152 #define RSP 152
#define SS 160 #define SS 160
#define ARGOFFSET R11
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* top of stack page */ /* top of stack page */
......
...@@ -41,13 +41,17 @@ struct pt_regs { ...@@ -41,13 +41,17 @@ struct pt_regs {
#ifndef __KERNEL__ #ifndef __KERNEL__
struct pt_regs { struct pt_regs {
/*
* C ABI says these regs are callee-preserved. They aren't saved on kernel entry
* unless syscall needs a complete, fully filled "struct pt_regs".
*/
unsigned long r15; unsigned long r15;
unsigned long r14; unsigned long r14;
unsigned long r13; unsigned long r13;
unsigned long r12; unsigned long r12;
unsigned long rbp; unsigned long rbp;
unsigned long rbx; unsigned long rbx;
/* arguments: non interrupts/non tracing syscalls only save up to here*/ /* These regs are callee-clobbered. Always saved on kernel entry. */
unsigned long r11; unsigned long r11;
unsigned long r10; unsigned long r10;
unsigned long r9; unsigned long r9;
...@@ -57,9 +61,12 @@ struct pt_regs { ...@@ -57,9 +61,12 @@ struct pt_regs {
unsigned long rdx; unsigned long rdx;
unsigned long rsi; unsigned long rsi;
unsigned long rdi; unsigned long rdi;
/*
* On syscall entry, this is syscall#. On CPU exception, this is error code.
* On hw interrupt, it's IRQ number:
*/
unsigned long orig_rax; unsigned long orig_rax;
/* end of arguments */ /* Return frame for iretq */
/* cpu exception frame or undefined */
unsigned long rip; unsigned long rip;
unsigned long cs; unsigned long cs;
unsigned long eflags; unsigned long eflags;
......
...@@ -177,9 +177,24 @@ struct sigcontext { ...@@ -177,9 +177,24 @@ struct sigcontext {
__u64 rip; __u64 rip;
__u64 eflags; /* RFLAGS */ __u64 eflags; /* RFLAGS */
__u16 cs; __u16 cs;
__u16 gs;
__u16 fs; /*
__u16 __pad0; * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
* Linux saved and restored fs and gs in these slots. This
* was counterproductive, as fsbase and gsbase were never
* saved, so arch_prctl was presumably unreliable.
*
* If these slots are ever needed for any other purpose, there
* is some risk that very old 64-bit binaries could get
* confused. I doubt that many such binaries still work,
* though, since the same patch in 2.5.64 also removed the
* 64-bit set_thread_area syscall, so it appears that there is
* no TLS API that works in both pre- and post-2.5.64 kernels.
*/
__u16 __pad2; /* Was gs. */
__u16 __pad1; /* Was fs. */
__u16 ss;
__u64 err; __u64 err;
__u64 trapno; __u64 trapno;
__u64 oldmask; __u64 oldmask;
......
...@@ -32,6 +32,7 @@ obj-$(CONFIG_X86_32) += i386_ksyms_32.o ...@@ -32,6 +32,7 @@ obj-$(CONFIG_X86_32) += i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o obj-$(CONFIG_X86_64) += mcount_64.o
obj-y += syscall_$(BITS).o vsyscall_gtod.o obj-y += syscall_$(BITS).o vsyscall_gtod.o
obj-$(CONFIG_IA32_EMULATION) += syscall_32.o
obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o obj-$(CONFIG_X86_VSYSCALL_EMULATION) += vsyscall_64.o vsyscall_emu_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_SYSFS) += ksysfs.o
......
...@@ -52,10 +52,25 @@ static int __init setup_noreplace_paravirt(char *str) ...@@ -52,10 +52,25 @@ static int __init setup_noreplace_paravirt(char *str)
__setup("noreplace-paravirt", setup_noreplace_paravirt); __setup("noreplace-paravirt", setup_noreplace_paravirt);
#endif #endif
#define DPRINTK(fmt, ...) \ #define DPRINTK(fmt, args...) \
do { \ do { \
if (debug_alternative) \ if (debug_alternative) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
} while (0)
#define DUMP_BYTES(buf, len, fmt, args...) \
do { \
if (unlikely(debug_alternative)) { \
int j; \
\
if (!(len)) \
break; \
\
printk(KERN_DEBUG fmt, ##args); \
for (j = 0; j < (len) - 1; j++) \
printk(KERN_CONT "%02hhx ", buf[j]); \
printk(KERN_CONT "%02hhx\n", buf[j]); \
} \
} while (0) } while (0)
/* /*
...@@ -243,12 +258,89 @@ extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; ...@@ -243,12 +258,89 @@ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[]; extern s32 __smp_locks[], __smp_locks_end[];
void *text_poke_early(void *addr, const void *opcode, size_t len); void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type. /*
This runs before SMP is initialized to avoid SMP problems with * Are we looking at a near JMP with a 1 or 4-byte displacement.
self modifying code. This implies that asymmetric systems where */
APs have less capabilities than the boot processor are not handled. static inline bool is_jmp(const u8 opcode)
Tough. Make sure you disable such features by hand. */ {
return opcode == 0xeb || opcode == 0xe9;
}
static void __init_or_module
recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
{
u8 *next_rip, *tgt_rip;
s32 n_dspl, o_dspl;
int repl_len;
if (a->replacementlen != 5)
return;
o_dspl = *(s32 *)(insnbuf + 1);
/* next_rip of the replacement JMP */
next_rip = repl_insn + a->replacementlen;
/* target rip of the replacement JMP */
tgt_rip = next_rip + o_dspl;
n_dspl = tgt_rip - orig_insn;
DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
if (tgt_rip - orig_insn >= 0) {
if (n_dspl - 2 <= 127)
goto two_byte_jmp;
else
goto five_byte_jmp;
/* negative offset */
} else {
if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
goto two_byte_jmp;
else
goto five_byte_jmp;
}
two_byte_jmp:
n_dspl -= 2;
insnbuf[0] = 0xeb;
insnbuf[1] = (s8)n_dspl;
add_nops(insnbuf + 2, 3);
repl_len = 2;
goto done;
five_byte_jmp:
n_dspl -= 5;
insnbuf[0] = 0xe9;
*(s32 *)&insnbuf[1] = n_dspl;
repl_len = 5;
done:
DPRINTK("final displ: 0x%08x, JMP 0x%lx",
n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
}
static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
{
if (instr[0] != 0x90)
return;
add_nops(instr + (a->instrlen - a->padlen), a->padlen);
DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
instr, a->instrlen - a->padlen, a->padlen);
}
/*
* Replace instructions with better alternatives for this CPU type. This runs
* before SMP is initialized to avoid SMP problems with self modifying code.
* This implies that asymmetric systems where APs have less capabilities than
* the boot processor are not handled. Tough. Make sure you disable such
* features by hand.
*/
void __init_or_module apply_alternatives(struct alt_instr *start, void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *end) struct alt_instr *end)
{ {
...@@ -256,10 +348,10 @@ void __init_or_module apply_alternatives(struct alt_instr *start, ...@@ -256,10 +348,10 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
u8 *instr, *replacement; u8 *instr, *replacement;
u8 insnbuf[MAX_PATCH_LEN]; u8 insnbuf[MAX_PATCH_LEN];
DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); DPRINTK("alt table %p -> %p", start, end);
/* /*
* The scan order should be from start to end. A later scanned * The scan order should be from start to end. A later scanned
* alternative code can overwrite a previous scanned alternative code. * alternative code can overwrite previously scanned alternative code.
* Some kernel functions (e.g. memcpy, memset, etc) use this order to * Some kernel functions (e.g. memcpy, memset, etc) use this order to
* patch code. * patch code.
* *
...@@ -267,29 +359,54 @@ void __init_or_module apply_alternatives(struct alt_instr *start, ...@@ -267,29 +359,54 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
* order. * order.
*/ */
for (a = start; a < end; a++) { for (a = start; a < end; a++) {
int insnbuf_sz = 0;
instr = (u8 *)&a->instr_offset + a->instr_offset; instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset;
BUG_ON(a->replacementlen > a->instrlen);
BUG_ON(a->instrlen > sizeof(insnbuf)); BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
if (!boot_cpu_has(a->cpuid)) if (!boot_cpu_has(a->cpuid)) {
if (a->padlen > 1)
optimize_nops(a, instr);
continue; continue;
}
DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
a->cpuid >> 5,
a->cpuid & 0x1f,
instr, a->instrlen,
replacement, a->replacementlen, a->padlen);
DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
memcpy(insnbuf, replacement, a->replacementlen); memcpy(insnbuf, replacement, a->replacementlen);
insnbuf_sz = a->replacementlen;
/* 0xe8 is a relative jump; fix the offset. */ /* 0xe8 is a relative jump; fix the offset. */
if (*insnbuf == 0xe8 && a->replacementlen == 5) if (*insnbuf == 0xe8 && a->replacementlen == 5) {
*(s32 *)(insnbuf + 1) += replacement - instr; *(s32 *)(insnbuf + 1) += replacement - instr;
DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
*(s32 *)(insnbuf + 1),
(unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
}
if (a->replacementlen && is_jmp(replacement[0]))
recompute_jump(a, instr, replacement, insnbuf);
add_nops(insnbuf + a->replacementlen, if (a->instrlen > a->replacementlen) {
a->instrlen - a->replacementlen); add_nops(insnbuf + a->replacementlen,
a->instrlen - a->replacementlen);
insnbuf_sz += a->instrlen - a->replacementlen;
}
DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
text_poke_early(instr, insnbuf, a->instrlen); text_poke_early(instr, insnbuf, insnbuf_sz);
} }
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void alternatives_smp_lock(const s32 *start, const s32 *end, static void alternatives_smp_lock(const s32 *start, const s32 *end,
u8 *text, u8 *text_end) u8 *text, u8 *text_end)
{ {
...@@ -371,8 +488,8 @@ void __init_or_module alternatives_smp_module_add(struct module *mod, ...@@ -371,8 +488,8 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,
smp->locks_end = locks_end; smp->locks_end = locks_end;
smp->text = text; smp->text = text;
smp->text_end = text_end; smp->text_end = text_end;
DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
__func__, smp->locks, smp->locks_end, smp->locks, smp->locks_end,
smp->text, smp->text_end, smp->name); smp->text, smp->text_end, smp->name);
list_add_tail(&smp->next, &smp_alt_modules); list_add_tail(&smp->next, &smp_alt_modules);
...@@ -440,7 +557,7 @@ int alternatives_text_reserved(void *start, void *end) ...@@ -440,7 +557,7 @@ int alternatives_text_reserved(void *start, void *end)
return 0; return 0;
} }
#endif #endif /* CONFIG_SMP */
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
void __init_or_module apply_paravirt(struct paravirt_patch_site *start, void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
...@@ -601,7 +718,7 @@ int poke_int3_handler(struct pt_regs *regs) ...@@ -601,7 +718,7 @@ int poke_int3_handler(struct pt_regs *regs)
if (likely(!bp_patching_in_progress)) if (likely(!bp_patching_in_progress))
return 0; return 0;
if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr) if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
return 0; return 0;
/* set up the specified breakpoint handler */ /* set up the specified breakpoint handler */
......
...@@ -68,7 +68,7 @@ void foo(void) ...@@ -68,7 +68,7 @@ void foo(void)
/* Offset from the sysenter stack to tss.sp0 */ /* Offset from the sysenter stack to tss.sp0 */
DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
sizeof(struct tss_struct)); offsetofend(struct tss_struct, SYSENTER_stack));
#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
BLANK(); BLANK();
......
...@@ -81,6 +81,7 @@ int main(void) ...@@ -81,6 +81,7 @@ int main(void)
#undef ENTRY #undef ENTRY
OFFSET(TSS_ist, tss_struct, x86_tss.ist); OFFSET(TSS_ist, tss_struct, x86_tss.ist);
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
BLANK(); BLANK();
DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1); DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
......
...@@ -711,6 +711,11 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -711,6 +711,11 @@ static void init_amd(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
/* 3DNow or LM implies PREFETCHW */
if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
......
...@@ -959,38 +959,37 @@ static void identify_cpu(struct cpuinfo_x86 *c) ...@@ -959,38 +959,37 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#endif #endif
} }
#ifdef CONFIG_X86_64 /*
#ifdef CONFIG_IA32_EMULATION * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
/* May not be __init: called during resume */ * on 32-bit kernels:
static void syscall32_cpu_init(void) */
{
/* Load these always in case some future AMD CPU supports
SYSENTER from compat mode too. */
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
#endif /* CONFIG_IA32_EMULATION */
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
void enable_sep_cpu(void) void enable_sep_cpu(void)
{ {
int cpu = get_cpu(); struct tss_struct *tss;
struct tss_struct *tss = &per_cpu(init_tss, cpu); int cpu;
if (!boot_cpu_has(X86_FEATURE_SEP)) { cpu = get_cpu();
put_cpu(); tss = &per_cpu(cpu_tss, cpu);
return;
} if (!boot_cpu_has(X86_FEATURE_SEP))
goto out;
/*
* We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
* see the big comment in struct x86_hw_tss's definition.
*/
tss->x86_tss.ss1 = __KERNEL_CS; tss->x86_tss.ss1 = __KERNEL_CS;
tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss; wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0); wrmsr(MSR_IA32_SYSENTER_ESP,
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0); (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0);
out:
put_cpu(); put_cpu();
} }
#endif #endif
...@@ -1118,7 +1117,7 @@ static __init int setup_disablecpuid(char *arg) ...@@ -1118,7 +1117,7 @@ static __init int setup_disablecpuid(char *arg)
__setup("clearcpuid=", setup_disablecpuid); __setup("clearcpuid=", setup_disablecpuid);
DEFINE_PER_CPU(unsigned long, kernel_stack) = DEFINE_PER_CPU(unsigned long, kernel_stack) =
(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; (unsigned long)&init_thread_union + THREAD_SIZE;
EXPORT_PER_CPU_SYMBOL(kernel_stack); EXPORT_PER_CPU_SYMBOL(kernel_stack);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -1130,8 +1129,8 @@ DEFINE_PER_CPU_FIRST(union irq_stack_union, ...@@ -1130,8 +1129,8 @@ DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible; irq_stack_union) __aligned(PAGE_SIZE) __visible;
/* /*
* The following four percpu variables are hot. Align current_task to * The following percpu variables are hot. Align current_task to
* cacheline size such that all four fall in the same cacheline. * cacheline size such that they fall in the same cacheline.
*/ */
DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
&init_task; &init_task;
...@@ -1171,10 +1170,23 @@ void syscall_init(void) ...@@ -1171,10 +1170,23 @@ void syscall_init(void)
*/ */
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
wrmsrl(MSR_LSTAR, system_call); wrmsrl(MSR_LSTAR, system_call);
wrmsrl(MSR_CSTAR, ignore_sysret);
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
syscall32_cpu_init(); wrmsrl(MSR_CSTAR, ia32_cstar_target);
/*
* This only works on Intel CPUs.
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
* This does not cause SYSENTER to jump to the wrong location, because
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
*/
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
#else
wrmsrl(MSR_CSTAR, ignore_sysret);
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
#endif #endif
/* Flags to clear on syscall */ /* Flags to clear on syscall */
...@@ -1226,6 +1238,15 @@ DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; ...@@ -1226,6 +1238,15 @@ DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count); EXPORT_PER_CPU_SYMBOL(__preempt_count);
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
/*
* On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
* the top of the kernel stack. Use an extra percpu variable to track the
* top of the kernel stack directly.
*/
DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
(unsigned long)&init_thread_union + THREAD_SIZE;
EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
#ifdef CONFIG_CC_STACKPROTECTOR #ifdef CONFIG_CC_STACKPROTECTOR
DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif #endif
...@@ -1307,7 +1328,7 @@ void cpu_init(void) ...@@ -1307,7 +1328,7 @@ void cpu_init(void)
*/ */
load_ucode_ap(); load_ucode_ap();
t = &per_cpu(init_tss, cpu); t = &per_cpu(cpu_tss, cpu);
oist = &per_cpu(orig_ist, cpu); oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
...@@ -1391,7 +1412,7 @@ void cpu_init(void) ...@@ -1391,7 +1412,7 @@ void cpu_init(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct task_struct *curr = current; struct task_struct *curr = current;
struct tss_struct *t = &per_cpu(init_tss, cpu); struct tss_struct *t = &per_cpu(cpu_tss, cpu);
struct thread_struct *thread = &curr->thread; struct thread_struct *thread = &curr->thread;
wait_for_master_cpu(cpu); wait_for_master_cpu(cpu);
......
...@@ -2146,6 +2146,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) ...@@ -2146,6 +2146,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
*/ */
static unsigned long code_segment_base(struct pt_regs *regs) static unsigned long code_segment_base(struct pt_regs *regs)
{ {
/*
* For IA32 we look at the GDT/LDT segment base to convert the
* effective IP to a linear address.
*/
#ifdef CONFIG_X86_32
/* /*
* If we are in VM86 mode, add the segment offset to convert to a * If we are in VM86 mode, add the segment offset to convert to a
* linear address. * linear address.
...@@ -2153,18 +2159,12 @@ static unsigned long code_segment_base(struct pt_regs *regs) ...@@ -2153,18 +2159,12 @@ static unsigned long code_segment_base(struct pt_regs *regs)
if (regs->flags & X86_VM_MASK) if (regs->flags & X86_VM_MASK)
return 0x10 * regs->cs; return 0x10 * regs->cs;
/*
* For IA32 we look at the GDT/LDT segment base to convert the
* effective IP to a linear address.
*/
#ifdef CONFIG_X86_32
if (user_mode(regs) && regs->cs != __USER_CS) if (user_mode(regs) && regs->cs != __USER_CS)
return get_segment_base(regs->cs); return get_segment_base(regs->cs);
#else #else
if (test_thread_flag(TIF_IA32)) { if (user_mode(regs) && !user_64bit_mode(regs) &&
if (user_mode(regs) && regs->cs != __USER32_CS) regs->cs != __USER32_CS)
return get_segment_base(regs->cs); return get_segment_base(regs->cs);
}
#endif #endif
return 0; return 0;
} }
......
...@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs) ...@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
struct pt_regs fixed_regs; struct pt_regs fixed_regs;
if (!user_mode_vm(regs)) { if (!user_mode(regs)) {
crash_fixup_ss_esp(&fixed_regs, regs); crash_fixup_ss_esp(&fixed_regs, regs);
regs = &fixed_regs; regs = &fixed_regs;
} }
......
...@@ -278,7 +278,7 @@ int __die(const char *str, struct pt_regs *regs, long err) ...@@ -278,7 +278,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
print_modules(); print_modules();
show_regs(regs); show_regs(regs);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
if (user_mode_vm(regs)) { if (user_mode(regs)) {
sp = regs->sp; sp = regs->sp;
ss = regs->ss & 0xffff; ss = regs->ss & 0xffff;
} else { } else {
...@@ -307,7 +307,7 @@ void die(const char *str, struct pt_regs *regs, long err) ...@@ -307,7 +307,7 @@ void die(const char *str, struct pt_regs *regs, long err)
unsigned long flags = oops_begin(); unsigned long flags = oops_begin();
int sig = SIGSEGV; int sig = SIGSEGV;
if (!user_mode_vm(regs)) if (!user_mode(regs))
report_bug(regs->ip, regs); report_bug(regs->ip, regs);
if (__die(str, regs, err)) if (__die(str, regs, err))
......
...@@ -123,13 +123,13 @@ void show_regs(struct pt_regs *regs) ...@@ -123,13 +123,13 @@ void show_regs(struct pt_regs *regs)
int i; int i;
show_regs_print_info(KERN_EMERG); show_regs_print_info(KERN_EMERG);
__show_regs(regs, !user_mode_vm(regs)); __show_regs(regs, !user_mode(regs));
/* /*
* When in-kernel, we also print out the stack and code at the * When in-kernel, we also print out the stack and code at the
* time of the fault.. * time of the fault..
*/ */
if (!user_mode_vm(regs)) { if (!user_mode(regs)) {
unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_prologue = code_bytes * 43 / 64;
unsigned int code_len = code_bytes; unsigned int code_len = code_bytes;
unsigned char c; unsigned char c;
......
...@@ -395,10 +395,13 @@ sysenter_past_esp: ...@@ -395,10 +395,13 @@ sysenter_past_esp:
/*CFI_REL_OFFSET cs, 0*/ /*CFI_REL_OFFSET cs, 0*/
/* /*
* Push current_thread_info()->sysenter_return to the stack. * Push current_thread_info()->sysenter_return to the stack.
* A tiny bit of offset fixup is necessary - 4*4 means the 4 words * A tiny bit of offset fixup is necessary: TI_sysenter_return
* pushed above; +8 corresponds to copy_thread's esp0 setting. * is relative to thread_info, which is at the bottom of the
* kernel stack page. 4*4 means the 4 words pushed above;
* TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
* and THREAD_SIZE takes us to the bottom.
*/ */
pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
CFI_REL_OFFSET eip, 0 CFI_REL_OFFSET eip, 0
pushl_cfi %eax pushl_cfi %eax
...@@ -432,7 +435,7 @@ sysenter_after_call: ...@@ -432,7 +435,7 @@ sysenter_after_call:
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testl $_TIF_ALLWORK_MASK, %ecx testl $_TIF_ALLWORK_MASK, %ecx
jne sysexit_audit jnz sysexit_audit
sysenter_exit: sysenter_exit:
/* if something modifies registers it must also disable sysexit */ /* if something modifies registers it must also disable sysexit */
movl PT_EIP(%esp), %edx movl PT_EIP(%esp), %edx
...@@ -460,7 +463,7 @@ sysenter_audit: ...@@ -460,7 +463,7 @@ sysenter_audit:
sysexit_audit: sysexit_audit:
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
jne syscall_exit_work jnz syscall_exit_work
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_ANY) ENABLE_INTERRUPTS(CLBR_ANY)
movl %eax,%edx /* second arg, syscall return value */ movl %eax,%edx /* second arg, syscall return value */
...@@ -472,7 +475,7 @@ sysexit_audit: ...@@ -472,7 +475,7 @@ sysexit_audit:
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
jne syscall_exit_work jnz syscall_exit_work
movl PT_EAX(%esp),%eax /* reload syscall return value */ movl PT_EAX(%esp),%eax /* reload syscall return value */
jmp sysenter_exit jmp sysenter_exit
#endif #endif
...@@ -510,7 +513,7 @@ syscall_exit: ...@@ -510,7 +513,7 @@ syscall_exit:
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testl $_TIF_ALLWORK_MASK, %ecx # current->work testl $_TIF_ALLWORK_MASK, %ecx # current->work
jne syscall_exit_work jnz syscall_exit_work
restore_all: restore_all:
TRACE_IRQS_IRET TRACE_IRQS_IRET
...@@ -612,7 +615,7 @@ work_notifysig: # deal with pending signals and ...@@ -612,7 +615,7 @@ work_notifysig: # deal with pending signals and
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
movl %esp, %eax movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or jnz work_notifysig_v86 # returning to kernel-space or
# vm86-space # vm86-space
1: 1:
#else #else
...@@ -720,43 +723,22 @@ END(sysenter_badsys) ...@@ -720,43 +723,22 @@ END(sysenter_badsys)
.endm .endm
/* /*
* Build the entry stubs and pointer table with some assembler magic. * Build the entry stubs with some assembler magic.
* We pack 7 stubs into a single 32-byte chunk, which will fit in a * We pack 1 stub into every 8-byte block.
* single cache line on all modern x86 implementations.
*/ */
.section .init.rodata,"a" .align 8
ENTRY(interrupt)
.section .entry.text, "ax"
.p2align 5
.p2align CONFIG_X86_L1_CACHE_SHIFT
ENTRY(irq_entries_start) ENTRY(irq_entries_start)
RING0_INT_FRAME RING0_INT_FRAME
vector=FIRST_EXTERNAL_VECTOR vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
.balign 32 pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
.rept 7 vector=vector+1
.if vector < FIRST_SYSTEM_VECTOR jmp common_interrupt
.if vector <> FIRST_EXTERNAL_VECTOR
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -4
.endif .align 8
1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ .endr
.if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
jmp 2f
.endif
.previous
.long 1b
.section .entry.text, "ax"
vector=vector+1
.endif
.endr
2: jmp common_interrupt
.endr
END(irq_entries_start) END(irq_entries_start)
.previous
END(interrupt)
.previous
/* /*
* the CPU automatically disables interrupts when executing an IRQ vector, * the CPU automatically disables interrupts when executing an IRQ vector,
* so IRQ-flags tracing has to follow that: * so IRQ-flags tracing has to follow that:
...@@ -816,15 +798,9 @@ ENTRY(simd_coprocessor_error) ...@@ -816,15 +798,9 @@ ENTRY(simd_coprocessor_error)
pushl_cfi $0 pushl_cfi $0
#ifdef CONFIG_X86_INVD_BUG #ifdef CONFIG_X86_INVD_BUG
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
661: pushl_cfi $do_general_protection ALTERNATIVE "pushl_cfi $do_general_protection", \
662: "pushl $do_simd_coprocessor_error", \
.section .altinstructions,"a" X86_FEATURE_XMM
altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
.previous
.section .altinstr_replacement,"ax"
663: pushl $do_simd_coprocessor_error
664:
.previous
#else #else
pushl_cfi $do_simd_coprocessor_error pushl_cfi $do_simd_coprocessor_error
#endif #endif
...@@ -1240,20 +1216,13 @@ error_code: ...@@ -1240,20 +1216,13 @@ error_code:
/*CFI_REL_OFFSET es, 0*/ /*CFI_REL_OFFSET es, 0*/
pushl_cfi %ds pushl_cfi %ds
/*CFI_REL_OFFSET ds, 0*/ /*CFI_REL_OFFSET ds, 0*/
pushl_cfi %eax pushl_cfi_reg eax
CFI_REL_OFFSET eax, 0 pushl_cfi_reg ebp
pushl_cfi %ebp pushl_cfi_reg edi
CFI_REL_OFFSET ebp, 0 pushl_cfi_reg esi
pushl_cfi %edi pushl_cfi_reg edx
CFI_REL_OFFSET edi, 0 pushl_cfi_reg ecx
pushl_cfi %esi pushl_cfi_reg ebx
CFI_REL_OFFSET esi, 0
pushl_cfi %edx
CFI_REL_OFFSET edx, 0
pushl_cfi %ecx
CFI_REL_OFFSET ecx, 0
pushl_cfi %ebx
CFI_REL_OFFSET ebx, 0
cld cld
movl $(__KERNEL_PERCPU), %ecx movl $(__KERNEL_PERCPU), %ecx
movl %ecx, %fs movl %ecx, %fs
......
This diff is collapsed.
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/bootparam.h>
/* Physical address */ /* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET) #define pa(X) ((X) - __PAGE_OFFSET)
...@@ -90,7 +91,7 @@ ENTRY(startup_32) ...@@ -90,7 +91,7 @@ ENTRY(startup_32)
/* test KEEP_SEGMENTS flag to see if the bootloader is asking /* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */ us to not reload segments */
testb $(1<<6), BP_loadflags(%esi) testb $KEEP_SEGMENTS, BP_loadflags(%esi)
jnz 2f jnz 2f
/* /*
......
/* /*
* linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
* *
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
...@@ -56,7 +56,7 @@ startup_64: ...@@ -56,7 +56,7 @@ startup_64:
* %rsi holds a physical pointer to real_mode_data. * %rsi holds a physical pointer to real_mode_data.
* *
* We come here either directly from a 64bit bootloader, or from * We come here either directly from a 64bit bootloader, or from
* arch/x86_64/boot/compressed/head.S. * arch/x86/boot/compressed/head_64.S.
* *
* We only come here initially at boot nothing else comes here. * We only come here initially at boot nothing else comes here.
* *
...@@ -146,7 +146,7 @@ startup_64: ...@@ -146,7 +146,7 @@ startup_64:
leaq level2_kernel_pgt(%rip), %rdi leaq level2_kernel_pgt(%rip), %rdi
leaq 4096(%rdi), %r8 leaq 4096(%rdi), %r8
/* See if it is a valid page table entry */ /* See if it is a valid page table entry */
1: testq $1, 0(%rdi) 1: testb $1, 0(%rdi)
jz 2f jz 2f
addq %rbp, 0(%rdi) addq %rbp, 0(%rdi)
/* Go to the next page */ /* Go to the next page */
......
...@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void) ...@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
static inline bool interrupted_user_mode(void) static inline bool interrupted_user_mode(void)
{ {
struct pt_regs *regs = get_irq_regs(); struct pt_regs *regs = get_irq_regs();
return regs && user_mode_vm(regs); return regs && user_mode(regs);
} }
/* /*
......
...@@ -54,7 +54,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) ...@@ -54,7 +54,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
* because the ->io_bitmap_max value must match the bitmap * because the ->io_bitmap_max value must match the bitmap
* contents: * contents:
*/ */
tss = &per_cpu(init_tss, get_cpu()); tss = &per_cpu(cpu_tss, get_cpu());
if (turn_on) if (turn_on)
bitmap_clear(t->io_bitmap_ptr, from, num); bitmap_clear(t->io_bitmap_ptr, from, num);
......
...@@ -165,7 +165,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) ...@@ -165,7 +165,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
if (unlikely(!desc)) if (unlikely(!desc))
return false; return false;
if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) { if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow)) if (unlikely(overflow))
print_stack_overflow(); print_stack_overflow();
desc->handle_irq(irq, desc); desc->handle_irq(irq, desc);
......
...@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) ...@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
u64 estack_top, estack_bottom; u64 estack_top, estack_bottom;
u64 curbase = (u64)task_stack_page(current); u64 curbase = (u64)task_stack_page(current);
if (user_mode_vm(regs)) if (user_mode(regs))
return; return;
if (regs->sp >= curbase + sizeof(struct thread_info) + if (regs->sp >= curbase + sizeof(struct thread_info) +
......
...@@ -178,7 +178,8 @@ void __init native_init_IRQ(void) ...@@ -178,7 +178,8 @@ void __init native_init_IRQ(void)
#endif #endif
for_each_clear_bit_from(i, used_vectors, first_system_vector) { for_each_clear_bit_from(i, used_vectors, first_system_vector) {
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */ /* IA32_SYSCALL_VECTOR could be used in trap_init already. */
set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); set_intr_gate(i, irq_entries_start +
8 * (i - FIRST_EXTERNAL_VECTOR));
} }
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
for_each_clear_bit_from(i, used_vectors, NR_VECTORS) for_each_clear_bit_from(i, used_vectors, NR_VECTORS)
......
...@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) ...@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
switch (regno) { switch (regno) {
case GDB_SS: case GDB_SS:
if (!user_mode_vm(regs)) if (!user_mode(regs))
*(unsigned long *)mem = __KERNEL_DS; *(unsigned long *)mem = __KERNEL_DS;
break; break;
case GDB_SP: case GDB_SP:
if (!user_mode_vm(regs)) if (!user_mode(regs))
*(unsigned long *)mem = kernel_stack_pointer(regs); *(unsigned long *)mem = kernel_stack_pointer(regs);
break; break;
case GDB_GS: case GDB_GS:
......
...@@ -602,7 +602,7 @@ int kprobe_int3_handler(struct pt_regs *regs) ...@@ -602,7 +602,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
struct kprobe *p; struct kprobe *p;
struct kprobe_ctlblk *kcb; struct kprobe_ctlblk *kcb;
if (user_mode_vm(regs)) if (user_mode(regs))
return 0; return 0;
addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
...@@ -1007,7 +1007,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, ...@@ -1007,7 +1007,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
struct die_args *args = data; struct die_args *args = data;
int ret = NOTIFY_DONE; int ret = NOTIFY_DONE;
if (args->regs && user_mode_vm(args->regs)) if (args->regs && user_mode(args->regs))
return ret; return ret;
if (val == DIE_GPF) { if (val == DIE_GPF) {
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/setup.h>
#if 0 #if 0
#define DEBUGP(fmt, ...) \ #define DEBUGP(fmt, ...) \
...@@ -47,21 +48,13 @@ do { \ ...@@ -47,21 +48,13 @@ do { \
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
static unsigned long module_load_offset; static unsigned long module_load_offset;
static int randomize_modules = 1;
/* Mutex protects the module_load_offset. */ /* Mutex protects the module_load_offset. */
static DEFINE_MUTEX(module_kaslr_mutex); static DEFINE_MUTEX(module_kaslr_mutex);
static int __init parse_nokaslr(char *p)
{
randomize_modules = 0;
return 0;
}
early_param("nokaslr", parse_nokaslr);
static unsigned long int get_module_load_offset(void) static unsigned long int get_module_load_offset(void)
{ {
if (randomize_modules) { if (kaslr_enabled()) {
mutex_lock(&module_kaslr_mutex); mutex_lock(&module_kaslr_mutex);
/* /*
* Calculate the module_load_offset the first time this * Calculate the module_load_offset the first time this
......
...@@ -131,10 +131,11 @@ void perf_get_regs_user(struct perf_regs *regs_user, ...@@ -131,10 +131,11 @@ void perf_get_regs_user(struct perf_regs *regs_user,
} }
/* /*
* RIP, flags, and the argument registers are usually saved. * These registers are always saved on 64-bit syscall entry.
* orig_ax is probably okay, too. * On 32-bit entry points, they are saved too except r8..r11.
*/ */
regs_user_copy->ip = user_regs->ip; regs_user_copy->ip = user_regs->ip;
regs_user_copy->ax = user_regs->ax;
regs_user_copy->cx = user_regs->cx; regs_user_copy->cx = user_regs->cx;
regs_user_copy->dx = user_regs->dx; regs_user_copy->dx = user_regs->dx;
regs_user_copy->si = user_regs->si; regs_user_copy->si = user_regs->si;
...@@ -145,9 +146,12 @@ void perf_get_regs_user(struct perf_regs *regs_user, ...@@ -145,9 +146,12 @@ void perf_get_regs_user(struct perf_regs *regs_user,
regs_user_copy->r11 = user_regs->r11; regs_user_copy->r11 = user_regs->r11;
regs_user_copy->orig_ax = user_regs->orig_ax; regs_user_copy->orig_ax = user_regs->orig_ax;
regs_user_copy->flags = user_regs->flags; regs_user_copy->flags = user_regs->flags;
regs_user_copy->sp = user_regs->sp;
regs_user_copy->cs = user_regs->cs;
regs_user_copy->ss = user_regs->ss;
/* /*
* Don't even try to report the "rest" regs. * Most system calls don't save these registers, don't report them.
*/ */
regs_user_copy->bx = -1; regs_user_copy->bx = -1;
regs_user_copy->bp = -1; regs_user_copy->bp = -1;
...@@ -158,37 +162,13 @@ void perf_get_regs_user(struct perf_regs *regs_user, ...@@ -158,37 +162,13 @@ void perf_get_regs_user(struct perf_regs *regs_user,
/* /*
* For this to be at all useful, we need a reasonable guess for * For this to be at all useful, we need a reasonable guess for
* sp and the ABI. Be careful: we're in NMI context, and we're * the ABI. Be careful: we're in NMI context, and we're
* considering current to be the current task, so we should * considering current to be the current task, so we should
* be careful not to look at any other percpu variables that might * be careful not to look at any other percpu variables that might
* change during context switches. * change during context switches.
*/ */
if (IS_ENABLED(CONFIG_IA32_EMULATION) && regs_user->abi = user_64bit_mode(user_regs) ?
task_thread_info(current)->status & TS_COMPAT) { PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
/* Easy case: we're in a compat syscall. */
regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
regs_user_copy->sp = user_regs->sp;
regs_user_copy->cs = user_regs->cs;
regs_user_copy->ss = user_regs->ss;
} else if (user_regs->orig_ax != -1) {
/*
* We're probably in a 64-bit syscall.
* Warning: this code is severely racy. At least it's better
* than just blindly copying user_regs.
*/
regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
regs_user_copy->sp = this_cpu_read(old_rsp);
regs_user_copy->cs = __USER_CS;
regs_user_copy->ss = __USER_DS;
regs_user_copy->cx = -1; /* usually contains garbage */
} else {
/* We're probably in an interrupt or exception. */
regs_user->abi = user_64bit_mode(user_regs) ?
PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
regs_user_copy->sp = user_regs->sp;
regs_user_copy->cs = user_regs->cs;
regs_user_copy->ss = user_regs->ss;
}
regs_user->regs = regs_user_copy; regs_user->regs = regs_user_copy;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -832,10 +832,15 @@ static void __init trim_low_memory_range(void) ...@@ -832,10 +832,15 @@ static void __init trim_low_memory_range(void)
static int static int
dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{ {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx " if (kaslr_enabled()) {
"(relocation range: 0x%lx-0x%lx)\n", pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
(unsigned long)&_text - __START_KERNEL, __START_KERNEL, (unsigned long)&_text - __START_KERNEL,
__START_KERNEL_map, MODULES_VADDR-1); __START_KERNEL,
__START_KERNEL_map,
MODULES_VADDR-1);
} else {
pr_emerg("Kernel Offset: disabled\n");
}
return 0; return 0;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment