Commit fdf5563a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Ingo Molnar:
 "This topic tree contains more commits than usual:

   - most of it are uaccess cleanups/reorganization by Al

   - there's a bunch of prototype declaration (--Wmissing-prototypes)
     cleanups

   - misc other cleanups all around the map"

* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits)
  x86/mm/set_memory: Fix -Wmissing-prototypes warnings
  x86/efi: Add a prototype for efi_arch_mem_reserve()
  x86/mm: Mark setup_emu2phys_nid() static
  x86/jump_label: Move 'inline' keyword placement
  x86/platform/uv: Add a missing prototype for uv_bau_message_interrupt()
  kill uaccess_try()
  x86: unsafe_put-style macro for sigmask
  x86: x32_setup_rt_frame(): consolidate uaccess areas
  x86: __setup_rt_frame(): consolidate uaccess areas
  x86: __setup_frame(): consolidate uaccess areas
  x86: setup_sigcontext(): list user_access_{begin,end}() into callers
  x86: get rid of put_user_try in __setup_rt_frame() (both 32bit and 64bit)
  x86: ia32_setup_rt_frame(): consolidate uaccess areas
  x86: ia32_setup_frame(): consolidate uaccess areas
  x86: ia32_setup_sigcontext(): lift user_access_{begin,end}() into the callers
  x86/alternatives: Mark text_poke_loc_init() static
  x86/cpu: Fix a -Wmissing-prototypes warning for init_ia32_feat_ctl()
  x86/mm: Drop pud_mknotpresent()
  x86: Replace setup_irq() by request_irq()
  x86/configs: Slightly reduce defconfigs
  ...
parents 97cddfc3 a2150327
...@@ -340,12 +340,6 @@ pointer which points to one of: ...@@ -340,12 +340,6 @@ pointer which points to one of:
entry->insn. It is used to distinguish page faults from machine entry->insn. It is used to distinguish page faults from machine
check. check.
3) ``int ex_handler_ext(const struct exception_table_entry *fixup)``
This case is used for uaccess_err ... we need to set a flag
in the task structure. Before the handler functions existed this
case was handled by adding a large offset to the fixup to tag
it as special.
More functions can easily be added. More functions can easily be added.
CONFIG_BUILDTIME_TABLE_SORT allows the __ex_table section to be sorted post CONFIG_BUILDTIME_TABLE_SORT allows the __ex_table section to be sorted post
......
...@@ -125,7 +125,6 @@ CONFIG_IP6_NF_MANGLE=y ...@@ -125,7 +125,6 @@ CONFIG_IP6_NF_MANGLE=y
CONFIG_NET_SCHED=y CONFIG_NET_SCHED=y
CONFIG_NET_EMATCH=y CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y CONFIG_NET_CLS_ACT=y
CONFIG_HAMRADIO=y
CONFIG_CFG80211=y CONFIG_CFG80211=y
CONFIG_MAC80211=y CONFIG_MAC80211=y
CONFIG_MAC80211_LEDS=y CONFIG_MAC80211_LEDS=y
...@@ -171,7 +170,6 @@ CONFIG_FORCEDETH=y ...@@ -171,7 +170,6 @@ CONFIG_FORCEDETH=y
CONFIG_8139TOO=y CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set # CONFIG_8139TOO_PIO is not set
CONFIG_R8169=y CONFIG_R8169=y
CONFIG_FDDI=y
CONFIG_INPUT_POLLDEV=y CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVDEV=y
......
...@@ -123,7 +123,6 @@ CONFIG_IP6_NF_MANGLE=y ...@@ -123,7 +123,6 @@ CONFIG_IP6_NF_MANGLE=y
CONFIG_NET_SCHED=y CONFIG_NET_SCHED=y
CONFIG_NET_EMATCH=y CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y CONFIG_NET_CLS_ACT=y
CONFIG_HAMRADIO=y
CONFIG_CFG80211=y CONFIG_CFG80211=y
CONFIG_MAC80211=y CONFIG_MAC80211=y
CONFIG_MAC80211_LEDS=y CONFIG_MAC80211_LEDS=y
...@@ -164,7 +163,6 @@ CONFIG_SKY2=y ...@@ -164,7 +163,6 @@ CONFIG_SKY2=y
CONFIG_FORCEDETH=y CONFIG_FORCEDETH=y
CONFIG_8139TOO=y CONFIG_8139TOO=y
CONFIG_R8169=y CONFIG_R8169=y
CONFIG_FDDI=y
CONFIG_INPUT_POLLDEV=y CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVDEV=y
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/io_bitmap.h> #include <asm/io_bitmap.h>
#include <asm/syscall.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
......
...@@ -2490,7 +2490,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent ...@@ -2490,7 +2490,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
/* 32-bit process in 64-bit kernel. */ /* 32-bit process in 64-bit kernel. */
unsigned long ss_base, cs_base; unsigned long ss_base, cs_base;
struct stack_frame_ia32 frame; struct stack_frame_ia32 frame;
const void __user *fp; const struct stack_frame_ia32 __user *fp;
if (!test_thread_flag(TIF_IA32)) if (!test_thread_flag(TIF_IA32))
return 0; return 0;
...@@ -2501,18 +2501,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent ...@@ -2501,18 +2501,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
fp = compat_ptr(ss_base + regs->bp); fp = compat_ptr(ss_base + regs->bp);
pagefault_disable(); pagefault_disable();
while (entry->nr < entry->max_stack) { while (entry->nr < entry->max_stack) {
unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;
if (!valid_user_frame(fp, sizeof(frame))) if (!valid_user_frame(fp, sizeof(frame)))
break; break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); if (__get_user(frame.next_frame, &fp->next_frame))
if (bytes != 0)
break; break;
bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4); if (__get_user(frame.return_address, &fp->return_address))
if (bytes != 0)
break; break;
perf_callchain_store(entry, cs_base + frame.return_address); perf_callchain_store(entry, cs_base + frame.return_address);
...@@ -2533,7 +2527,7 @@ void ...@@ -2533,7 +2527,7 @@ void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{ {
struct stack_frame frame; struct stack_frame frame;
const unsigned long __user *fp; const struct stack_frame __user *fp;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */ /* TODO: We don't support guest os callchain now */
...@@ -2546,7 +2540,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs ...@@ -2546,7 +2540,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
return; return;
fp = (unsigned long __user *)regs->bp; fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip); perf_callchain_store(entry, regs->ip);
...@@ -2558,19 +2552,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs ...@@ -2558,19 +2552,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
pagefault_disable(); pagefault_disable();
while (entry->nr < entry->max_stack) { while (entry->nr < entry->max_stack) {
unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
if (!valid_user_frame(fp, sizeof(frame))) if (!valid_user_frame(fp, sizeof(frame)))
break; break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); if (__get_user(frame.next_frame, &fp->next_frame))
if (bytes != 0)
break; break;
bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp)); if (__get_user(frame.return_address, &fp->return_address))
if (bytes != 0)
break; break;
perf_callchain_store(entry, frame.return_address); perf_callchain_store(entry, frame.return_address);
......
This diff is collapsed.
...@@ -138,9 +138,6 @@ ...@@ -138,9 +138,6 @@
# define _ASM_EXTABLE_FAULT(from, to) \ # define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
# define _ASM_NOKPROBE(entry) \ # define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \ .pushsection "_kprobe_blacklist","aw" ; \
_ASM_ALIGN ; \ _ASM_ALIGN ; \
...@@ -166,9 +163,6 @@ ...@@ -166,9 +163,6 @@
# define _ASM_EXTABLE_FAULT(from, to) \ # define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
/* For C file, we already have NOKPROBE_SYMBOL macro */ /* For C file, we already have NOKPROBE_SYMBOL macro */
#endif #endif
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#define MWAITX_MAX_LOOPS ((u32)-1) #define MWAITX_MAX_LOOPS ((u32)-1)
#define MWAITX_DISABLE_CSTATES 0xf0 #define MWAITX_DISABLE_CSTATES 0xf0
u32 get_umwait_control_msr(void);
static inline void __monitor(const void *eax, unsigned long ecx, static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx) unsigned long edx)
{ {
......
...@@ -595,12 +595,6 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd) ...@@ -595,12 +595,6 @@ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
__pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE))); __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
} }
static inline pud_t pud_mknotpresent(pud_t pud)
{
return pfn_pud(pud_pfn(pud),
__pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
}
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask); static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
......
...@@ -542,7 +542,6 @@ struct thread_struct { ...@@ -542,7 +542,6 @@ struct thread_struct {
mm_segment_t addr_limit; mm_segment_t addr_limit;
unsigned int sig_on_uaccess_err:1; unsigned int sig_on_uaccess_err:1;
unsigned int uaccess_err:1; /* uaccess failed */
/* Floating point and extended processor state */ /* Floating point and extended processor state */
struct fpu fpu; struct fpu fpu;
......
...@@ -46,6 +46,8 @@ int set_memory_4k(unsigned long addr, int numpages); ...@@ -46,6 +46,8 @@ int set_memory_4k(unsigned long addr, int numpages);
int set_memory_encrypted(unsigned long addr, int numpages); int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages); int set_memory_decrypted(unsigned long addr, int numpages);
int set_memory_np_noalias(unsigned long addr, int numpages); int set_memory_np_noalias(unsigned long addr, int numpages);
int set_memory_nonglobal(unsigned long addr, int numpages);
int set_memory_global(unsigned long addr, int numpages);
int set_pages_array_uc(struct page **pages, int addrinarray); int set_pages_array_uc(struct page **pages, int addrinarray);
int set_pages_array_wc(struct page **pages, int addrinarray); int set_pages_array_wc(struct page **pages, int addrinarray);
......
...@@ -33,11 +33,7 @@ struct sigframe_ia32 { ...@@ -33,11 +33,7 @@ struct sigframe_ia32 {
* legacy application accessing/modifying it. * legacy application accessing/modifying it.
*/ */
struct _fpstate_32 fpstate_unused; struct _fpstate_32 fpstate_unused;
#ifdef CONFIG_IA32_EMULATION unsigned int extramask[1];
unsigned int extramask[_COMPAT_NSIG_WORDS-1];
#else /* !CONFIG_IA32_EMULATION */
unsigned long extramask[_NSIG_WORDS-1];
#endif /* CONFIG_IA32_EMULATION */
char retcode[8]; char retcode[8];
/* fp state follows here */ /* fp state follows here */
}; };
......
...@@ -14,7 +14,5 @@ ...@@ -14,7 +14,5 @@
X86_EFLAGS_CF | X86_EFLAGS_RF) X86_EFLAGS_CF | X86_EFLAGS_RF)
void signal_fault(struct pt_regs *regs, void __user *frame, char *where); void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
struct pt_regs *regs, unsigned long mask);
#endif /* _ASM_X86_SIGHANDLING_H */ #endif /* _ASM_X86_SIGHANDLING_H */
...@@ -159,6 +159,11 @@ static inline int syscall_get_arch(struct task_struct *task) ...@@ -159,6 +159,11 @@ static inline int syscall_get_arch(struct task_struct *task)
task->thread_info.status & TS_COMPAT) task->thread_info.status & TS_COMPAT)
? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
} }
void do_syscall_64(unsigned long nr, struct pt_regs *regs);
void do_int80_syscall_32(struct pt_regs *regs);
long do_fast_syscall_32(struct pt_regs *regs);
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
#endif /* _ASM_X86_SYSCALL_H */ #endif /* _ASM_X86_SYSCALL_H */
...@@ -193,23 +193,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) ...@@ -193,23 +193,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
: : "A" (x), "r" (addr) \ : : "A" (x), "r" (addr) \
: : label) : : label)
#define __put_user_asm_ex_u64(x, addr) \
asm volatile("\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
"3:" \
_ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE_EX(2b, 3b) \
: : "A" (x), "r" (addr))
#define __put_user_x8(x, ptr, __ret_pu) \ #define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \ asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else #else
#define __put_user_goto_u64(x, ptr, label) \ #define __put_user_goto_u64(x, ptr, label) \
__put_user_goto(x, ptr, "q", "", "er", label) __put_user_goto(x, ptr, "q", "", "er", label)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "er")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif #endif
...@@ -289,31 +278,6 @@ do { \ ...@@ -289,31 +278,6 @@ do { \
} \ } \
} while (0) } while (0)
/*
* This doesn't do __uaccess_begin/end - the exception handling
* around it must do that.
*/
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
break; \
case 2: \
__put_user_asm_ex(x, ptr, "w", "w", "ir"); \
break; \
case 4: \
__put_user_asm_ex(x, ptr, "l", "k", "ir"); \
break; \
case 8: \
__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) \ #define __get_user_asm_u64(x, ptr, retval, errret) \
({ \ ({ \
...@@ -335,12 +299,9 @@ do { \ ...@@ -335,12 +299,9 @@ do { \
"i" (errret), "0" (retval)); \ "i" (errret), "0" (retval)); \
}) })
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else #else
#define __get_user_asm_u64(x, ptr, retval, errret) \ #define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret) __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
__get_user_asm_ex(x, ptr, "q", "", "=r")
#endif #endif
#define __get_user_size(x, ptr, size, retval, errret) \ #define __get_user_size(x, ptr, size, retval, errret) \
...@@ -378,53 +339,6 @@ do { \ ...@@ -378,53 +339,6 @@ do { \
: "=r" (err), ltype(x) \ : "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err)) : "m" (__m(addr)), "i" (errret), "0" (err))
#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
asm volatile("\n" \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_UA(1b, 3b) \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
/*
* This doesn't do __uaccess_begin/end - the exception handling
* around it must do that.
*/
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \
break; \
case 2: \
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \
break; \
case 4: \
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:xor"itype" %"rtype"0,%"rtype"0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE_EX(1b, 3b) \
: ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \ #define __put_user_nocheck(x, ptr, size) \
({ \ ({ \
__label__ __pu_label; \ __label__ __pu_label; \
...@@ -480,29 +394,6 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -480,29 +394,6 @@ struct __large_struct { unsigned long buf[100]; };
retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \ retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
} while (0) } while (0)
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: : ltype(x), "m" (__m(addr)))
/*
* uaccess_try and catch
*/
#define uaccess_try do { \
current->thread.uaccess_err = 0; \
__uaccess_begin(); \
barrier();
#define uaccess_try_nospec do { \
current->thread.uaccess_err = 0; \
__uaccess_begin_nospec(); \
#define uaccess_catch(err) \
__uaccess_end(); \
(err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
} while (0)
/** /**
* __get_user - Get a simple variable from user space, with less checking. * __get_user - Get a simple variable from user space, with less checking.
* @x: Variable to store result. * @x: Variable to store result.
...@@ -552,28 +443,6 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -552,28 +443,6 @@ struct __large_struct { unsigned long buf[100]; };
#define __put_user(x, ptr) \ #define __put_user(x, ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
/*
* {get|put}_user_try and catch
*
* get_user_try {
* get_user_ex(...);
* } get_user_catch(err)
*/
#define get_user_try uaccess_try_nospec
#define get_user_catch(err) uaccess_catch(err)
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr)))__gue_val; \
} while (0)
#define put_user_try uaccess_try
#define put_user_catch(err) uaccess_catch(err)
#define put_user_ex(x, ptr) \
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
extern unsigned long extern unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n); copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long extern __must_check long
...@@ -601,15 +470,6 @@ extern struct movsl_mask { ...@@ -601,15 +470,6 @@ extern struct movsl_mask {
# include <asm/uaccess_64.h> # include <asm/uaccess_64.h>
#endif #endif
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
*
* Caller must use pagefault_enable/disable, or run in interrupt context,
* and also do a uaccess_ok() check
*/
#define __copy_from_user_nmi __copy_from_user_inatomic
/* /*
* The "unsafe" user accesses aren't really "unsafe", but the naming * The "unsafe" user accesses aren't really "unsafe", but the naming
* is a big fat warning: you have to not only do the access_ok() * is a big fat warning: you have to not only do the access_ok()
......
...@@ -23,33 +23,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -23,33 +23,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long static __always_inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
ret = 0;
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u8 *)to, from, ret,
"b", "b", "=q", 1);
__uaccess_end();
return ret;
case 2:
ret = 0;
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u16 *)to, from, ret,
"w", "w", "=r", 2);
__uaccess_end();
return ret;
case 4:
ret = 0;
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u32 *)to, from, ret,
"l", "k", "=r", 4);
__uaccess_end();
return ret;
}
}
return __copy_user_ll(to, (__force const void *)from, n); return __copy_user_ll(to, (__force const void *)from, n);
} }
......
...@@ -65,117 +65,13 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len) ...@@ -65,117 +65,13 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len)
static __always_inline __must_check unsigned long static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size) raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{ {
int ret = 0; return copy_user_generic(dst, (__force void *)src, size);
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
case 1:
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4);
__uaccess_end();
return ret;
case 8:
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8);
__uaccess_end();
return ret;
case 10:
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10);
if (likely(!ret))
__get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
(u16 __user *)(8 + (char __user *)src),
ret, "w", "w", "=r", 2);
__uaccess_end();
return ret;
case 16:
__uaccess_begin_nospec();
__get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16);
if (likely(!ret))
__get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
(u64 __user *)(8 + (char __user *)src),
ret, "q", "", "=r", 8);
__uaccess_end();
return ret;
default:
return copy_user_generic(dst, (__force void *)src, size);
}
} }
static __always_inline __must_check unsigned long static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size) raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{ {
int ret = 0; return copy_user_generic((__force void *)dst, src, size);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
case 1:
__uaccess_begin();
__put_user_asm(*(u8 *)src, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
__put_user_asm(*(u16 *)src, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
__put_user_asm(*(u32 *)src, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
__uaccess_end();
return ret;
case 8:
__uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 8);
__uaccess_end();
return ret;
case 10:
__uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 10);
if (likely(!ret)) {
asm("":::"memory");
__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
ret, "w", "w", "ir", 2);
}
__uaccess_end();
return ret;
case 16:
__uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 16);
if (likely(!ret)) {
asm("":::"memory");
__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
ret, "q", "", "er", 8);
}
__uaccess_end();
return ret;
default:
return copy_user_generic((__force void *)dst, src, size);
}
} }
static __always_inline __must_check static __always_inline __must_check
......
...@@ -858,4 +858,6 @@ static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) ...@@ -858,4 +858,6 @@ static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
return 1; return 1;
} }
void uv_bau_message_interrupt(struct pt_regs *regs);
#endif /* _ASM_X86_UV_UV_BAU_H */ #endif /* _ASM_X86_UV_UV_BAU_H */
...@@ -1167,8 +1167,8 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries ...@@ -1167,8 +1167,8 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
atomic_cond_read_acquire(&desc.refs, !VAL); atomic_cond_read_acquire(&desc.refs, !VAL);
} }
void text_poke_loc_init(struct text_poke_loc *tp, void *addr, static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
const void *opcode, size_t len, const void *emulate) const void *opcode, size_t len, const void *emulate)
{ {
struct insn insn; struct insn insn;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/vmx.h> #include <asm/vmx.h>
#include "cpu.h"
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "x86/cpu: " fmt #define pr_fmt(fmt) "x86/cpu: " fmt
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/mwait.h>
#define UMWAIT_C02_ENABLE 0 #define UMWAIT_C02_ENABLE 0
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/io_bitmap.h> #include <asm/io_bitmap.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/syscalls.h>
#ifdef CONFIG_X86_IOPL_IOPERM #ifdef CONFIG_X86_IOPL_IOPERM
......
...@@ -44,15 +44,6 @@ ...@@ -44,15 +44,6 @@
* (these are usually mapped into the 0x30-0xff vector range) * (these are usually mapped into the 0x30-0xff vector range)
*/ */
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = { DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... NR_VECTORS - 1] = VECTOR_UNUSED, [0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
}; };
...@@ -84,7 +75,7 @@ void __init init_IRQ(void) ...@@ -84,7 +75,7 @@ void __init init_IRQ(void)
* On cpu 0, Assign ISA_IRQ_VECTOR(irq) to IRQ 0..15. * On cpu 0, Assign ISA_IRQ_VECTOR(irq) to IRQ 0..15.
* If these IRQ's are handled by legacy interrupt-controllers like PIC, * If these IRQ's are handled by legacy interrupt-controllers like PIC,
* then this configuration will likely be static after the boot. If * then this configuration will likely be static after the boot. If
* these IRQ's are handled by more mordern controllers like IO-APIC, * these IRQs are handled by more modern controllers like IO-APIC,
* then this vector space can be freed and re-used dynamically as the * then this vector space can be freed and re-used dynamically as the
* irq's migrate etc. * irq's migrate etc.
*/ */
...@@ -104,6 +95,9 @@ void __init native_init_IRQ(void) ...@@ -104,6 +95,9 @@ void __init native_init_IRQ(void)
idt_setup_apic_and_irq_gates(); idt_setup_apic_and_irq_gates();
lapic_assign_system_vectors(); lapic_assign_system_vectors();
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs()) if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs()) {
setup_irq(2, &irq2); /* IRQ2 is cascade interrupt to second interrupt controller */
if (request_irq(2, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("%s: request_irq() failed\n", "cascade");
}
} }
...@@ -58,7 +58,7 @@ __jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type, ...@@ -58,7 +58,7 @@ __jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type,
return code; return code;
} }
static void inline __jump_label_transform(struct jump_entry *entry, static inline void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type, enum jump_label_type type,
int init) int init)
{ {
......
...@@ -403,9 +403,9 @@ static void default_do_nmi(struct pt_regs *regs) ...@@ -403,9 +403,9 @@ static void default_do_nmi(struct pt_regs *regs)
* a 'real' unknown NMI. For example, while processing * a 'real' unknown NMI. For example, while processing
* a perf NMI another perf NMI comes in along with a * a perf NMI another perf NMI comes in along with a
* 'real' unknown NMI. These two NMIs get combined into * 'real' unknown NMI. These two NMIs get combined into
* one (as descibed above). When the next NMI gets * one (as described above). When the next NMI gets
* processed, it will be flagged by perf as handled, but * processed, it will be flagged by perf as handled, but
* noone will know that there was a 'real' unknown NMI sent * no one will know that there was a 'real' unknown NMI sent
* also. As a result it gets swallowed. Or if the first * also. As a result it gets swallowed. Or if the first
* perf NMI returns two events handled then the second * perf NMI returns two events handled then the second
* NMI will get eaten by the logic below, again losing a * NMI will get eaten by the logic below, again losing a
......
...@@ -531,7 +531,7 @@ static void emergency_vmx_disable_all(void) ...@@ -531,7 +531,7 @@ static void emergency_vmx_disable_all(void)
/* /*
* We need to disable VMX on all CPUs before rebooting, otherwise * We need to disable VMX on all CPUs before rebooting, otherwise
* we risk hanging up the machine, because the CPU ignore INIT * we risk hanging up the machine, because the CPU ignores INIT
* signals when VMX is enabled. * signals when VMX is enabled.
* *
* We can't take any locks and we may be on an inconsistent * We can't take any locks and we may be on an inconsistent
......
This diff is collapsed.
...@@ -1438,7 +1438,7 @@ early_param("possible_cpus", _setup_possible_cpus); ...@@ -1438,7 +1438,7 @@ early_param("possible_cpus", _setup_possible_cpus);
/* /*
* cpu_possible_mask should be static, it cannot change as cpu's * cpu_possible_mask should be static, it cannot change as cpu's
* are onlined, or offlined. The reason is per-cpu data-structures * are onlined, or offlined. The reason is per-cpu data-structures
* are allocated by some modules at init time, and dont expect to * are allocated by some modules at init time, and don't expect to
* do this dynamically on cpu arrival/departure. * do this dynamically on cpu arrival/departure.
* cpu_present_mask on the other hand can change dynamically. * cpu_present_mask on the other hand can change dynamically.
* In case when cpu_hotplug is not compiled, then we resort to current * In case when cpu_hotplug is not compiled, then we resort to current
......
...@@ -96,7 +96,8 @@ struct stack_frame_user { ...@@ -96,7 +96,8 @@ struct stack_frame_user {
}; };
static int static int
copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) copy_stack_frame(const struct stack_frame_user __user *fp,
struct stack_frame_user *frame)
{ {
int ret; int ret;
...@@ -105,7 +106,8 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) ...@@ -105,7 +106,8 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
ret = 1; ret = 1;
pagefault_disable(); pagefault_disable();
if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) if (__get_user(frame->next_fp, &fp->next_fp) ||
__get_user(frame->ret_addr, &fp->ret_addr))
ret = 0; ret = 0;
pagefault_enable(); pagefault_enable();
......
...@@ -62,19 +62,16 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) ...@@ -62,19 +62,16 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
.name = "timer"
};
static void __init setup_default_timer_irq(void) static void __init setup_default_timer_irq(void)
{ {
unsigned long flags = IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER;
/* /*
* Unconditionally register the legacy timer; even without legacy * Unconditionally register the legacy timer interrupt; even
* PIC/PIT we need this for the HPET0 in legacy replacement mode. * without legacy PIC/PIT we need this for the HPET0 in legacy
* replacement mode.
*/ */
if (setup_irq(0, &irq0)) if (request_irq(0, timer_interrupt, flags, "timer", NULL))
pr_info("Failed to register legacy timer interrupt\n"); pr_info("Failed to register legacy timer interrupt\n");
} }
......
...@@ -477,7 +477,7 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) ...@@ -477,7 +477,7 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
* transition from one expected value to another with a fairly * transition from one expected value to another with a fairly
* high accuracy, and we didn't miss any events. We can thus * high accuracy, and we didn't miss any events. We can thus
* use the TSC value at the transitions to calculate a pretty * use the TSC value at the transitions to calculate a pretty
* good value for the TSC frequencty. * good value for the TSC frequency.
*/ */
static inline int pit_verify_msb(unsigned char val) static inline int pit_verify_msb(unsigned char val)
{ {
......
...@@ -295,7 +295,7 @@ static cycles_t check_tsc_warp(unsigned int timeout) ...@@ -295,7 +295,7 @@ static cycles_t check_tsc_warp(unsigned int timeout)
* But as the TSC is per-logical CPU and can potentially be modified wrongly * But as the TSC is per-logical CPU and can potentially be modified wrongly
* by the bios, TSC sync test for smaller duration should be able * by the bios, TSC sync test for smaller duration should be able
* to catch such errors. Also this will catch the condition where all the * to catch such errors. Also this will catch the condition where all the
* cores in the socket doesn't get reset at the same time. * cores in the socket don't get reset at the same time.
*/ */
static inline unsigned int loop_timeout(int cpu) static inline unsigned int loop_timeout(int cpu)
{ {
......
...@@ -98,7 +98,6 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) ...@@ -98,7 +98,6 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct vm86plus_struct __user *user; struct vm86plus_struct __user *user;
struct vm86 *vm86 = current->thread.vm86; struct vm86 *vm86 = current->thread.vm86;
long err = 0;
/* /*
* This gets called from entry.S with interrupts disabled, but * This gets called from entry.S with interrupts disabled, but
...@@ -114,37 +113,30 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) ...@@ -114,37 +113,30 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
user = vm86->user_vm86; user = vm86->user_vm86;
if (!access_ok(user, vm86->vm86plus.is_vm86pus ? if (!user_access_begin(user, vm86->vm86plus.is_vm86pus ?
sizeof(struct vm86plus_struct) : sizeof(struct vm86plus_struct) :
sizeof(struct vm86_struct))) { sizeof(struct vm86_struct)))
pr_alert("could not access userspace vm86 info\n"); goto Efault;
do_exit(SIGSEGV);
} unsafe_put_user(regs->pt.bx, &user->regs.ebx, Efault_end);
unsafe_put_user(regs->pt.cx, &user->regs.ecx, Efault_end);
put_user_try { unsafe_put_user(regs->pt.dx, &user->regs.edx, Efault_end);
put_user_ex(regs->pt.bx, &user->regs.ebx); unsafe_put_user(regs->pt.si, &user->regs.esi, Efault_end);
put_user_ex(regs->pt.cx, &user->regs.ecx); unsafe_put_user(regs->pt.di, &user->regs.edi, Efault_end);
put_user_ex(regs->pt.dx, &user->regs.edx); unsafe_put_user(regs->pt.bp, &user->regs.ebp, Efault_end);
put_user_ex(regs->pt.si, &user->regs.esi); unsafe_put_user(regs->pt.ax, &user->regs.eax, Efault_end);
put_user_ex(regs->pt.di, &user->regs.edi); unsafe_put_user(regs->pt.ip, &user->regs.eip, Efault_end);
put_user_ex(regs->pt.bp, &user->regs.ebp); unsafe_put_user(regs->pt.cs, &user->regs.cs, Efault_end);
put_user_ex(regs->pt.ax, &user->regs.eax); unsafe_put_user(regs->pt.flags, &user->regs.eflags, Efault_end);
put_user_ex(regs->pt.ip, &user->regs.eip); unsafe_put_user(regs->pt.sp, &user->regs.esp, Efault_end);
put_user_ex(regs->pt.cs, &user->regs.cs); unsafe_put_user(regs->pt.ss, &user->regs.ss, Efault_end);
put_user_ex(regs->pt.flags, &user->regs.eflags); unsafe_put_user(regs->es, &user->regs.es, Efault_end);
put_user_ex(regs->pt.sp, &user->regs.esp); unsafe_put_user(regs->ds, &user->regs.ds, Efault_end);
put_user_ex(regs->pt.ss, &user->regs.ss); unsafe_put_user(regs->fs, &user->regs.fs, Efault_end);
put_user_ex(regs->es, &user->regs.es); unsafe_put_user(regs->gs, &user->regs.gs, Efault_end);
put_user_ex(regs->ds, &user->regs.ds); unsafe_put_user(vm86->screen_bitmap, &user->screen_bitmap, Efault_end);
put_user_ex(regs->fs, &user->regs.fs);
put_user_ex(regs->gs, &user->regs.gs); user_access_end();
put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
} put_user_catch(err);
if (err) {
pr_alert("could not access userspace vm86 info\n");
do_exit(SIGSEGV);
}
preempt_disable(); preempt_disable();
tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sp0 = vm86->saved_sp0;
...@@ -159,6 +151,13 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval) ...@@ -159,6 +151,13 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
lazy_load_gs(vm86->regs32.gs); lazy_load_gs(vm86->regs32.gs);
regs->pt.ax = retval; regs->pt.ax = retval;
return;
Efault_end:
user_access_end();
Efault:
pr_alert("could not access userspace vm86 info\n");
do_exit(SIGSEGV);
} }
static void mark_screen_rdonly(struct mm_struct *mm) static void mark_screen_rdonly(struct mm_struct *mm)
...@@ -243,6 +242,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) ...@@ -243,6 +242,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
struct kernel_vm86_regs vm86regs; struct kernel_vm86_regs vm86regs;
struct pt_regs *regs = current_pt_regs(); struct pt_regs *regs = current_pt_regs();
unsigned long err = 0; unsigned long err = 0;
struct vm86_struct v;
err = security_mmap_addr(0); err = security_mmap_addr(0);
if (err) { if (err) {
...@@ -278,39 +278,32 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) ...@@ -278,39 +278,32 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
if (vm86->saved_sp0) if (vm86->saved_sp0)
return -EPERM; return -EPERM;
if (!access_ok(user_vm86, plus ? if (copy_from_user(&v, user_vm86,
sizeof(struct vm86_struct) : offsetof(struct vm86_struct, int_revectored)))
sizeof(struct vm86plus_struct)))
return -EFAULT; return -EFAULT;
memset(&vm86regs, 0, sizeof(vm86regs)); memset(&vm86regs, 0, sizeof(vm86regs));
get_user_try {
unsigned short seg; vm86regs.pt.bx = v.regs.ebx;
get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx); vm86regs.pt.cx = v.regs.ecx;
get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx); vm86regs.pt.dx = v.regs.edx;
get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx); vm86regs.pt.si = v.regs.esi;
get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi); vm86regs.pt.di = v.regs.edi;
get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi); vm86regs.pt.bp = v.regs.ebp;
get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp); vm86regs.pt.ax = v.regs.eax;
get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax); vm86regs.pt.ip = v.regs.eip;
get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip); vm86regs.pt.cs = v.regs.cs;
get_user_ex(seg, &user_vm86->regs.cs); vm86regs.pt.flags = v.regs.eflags;
vm86regs.pt.cs = seg; vm86regs.pt.sp = v.regs.esp;
get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags); vm86regs.pt.ss = v.regs.ss;
get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp); vm86regs.es = v.regs.es;
get_user_ex(seg, &user_vm86->regs.ss); vm86regs.ds = v.regs.ds;
vm86regs.pt.ss = seg; vm86regs.fs = v.regs.fs;
get_user_ex(vm86regs.es, &user_vm86->regs.es); vm86regs.gs = v.regs.gs;
get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
get_user_ex(vm86regs.fs, &user_vm86->regs.fs); vm86->flags = v.flags;
get_user_ex(vm86regs.gs, &user_vm86->regs.gs); vm86->screen_bitmap = v.screen_bitmap;
vm86->cpu_type = v.cpu_type;
get_user_ex(vm86->flags, &user_vm86->flags);
get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
} get_user_catch(err);
if (err)
return err;
if (copy_from_user(&vm86->int_revectored, if (copy_from_user(&vm86->int_revectored,
&user_vm86->int_revectored, &user_vm86->int_revectored,
......
...@@ -400,7 +400,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -400,7 +400,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
goto error; goto error;
ptep_user = (pt_element_t __user *)((void *)host_addr + offset); ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) if (unlikely(__get_user(pte, ptep_user)))
goto error; goto error;
walker->ptep_user[walker->level - 1] = ptep_user; walker->ptep_user[walker->level - 1] = ptep_user;
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/mshyperv.h> #include <asm/mshyperv.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h> #include <asm/spec-ctrl.h>
#include <asm/virtext.h> #include <asm/virtext.h>
#include <asm/vmx.h> #include <asm/vmx.h>
......
...@@ -14,8 +14,6 @@ ...@@ -14,8 +14,6 @@
extern const u32 vmx_msr_index[]; extern const u32 vmx_msr_index[];
extern u64 host_efer; extern u64 host_efer;
extern u32 get_umwait_control_msr(void);
#define MSR_TYPE_R 1 #define MSR_TYPE_R 1
#define MSR_TYPE_W 2 #define MSR_TYPE_W 2
#define MSR_TYPE_RW 3 #define MSR_TYPE_RW 3
......
...@@ -80,18 +80,6 @@ __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, ...@@ -80,18 +80,6 @@ __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
} }
EXPORT_SYMBOL(ex_handler_uaccess); EXPORT_SYMBOL(ex_handler_uaccess);
__visible bool ex_handler_ext(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{
/* Special hack for uaccess_err */
current->thread.uaccess_err = 1;
regs->ip = ex_fixup_addr(fixup);
return true;
}
EXPORT_SYMBOL(ex_handler_ext);
__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup, __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr, struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long error_code,
......
...@@ -324,7 +324,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, ...@@ -324,7 +324,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
0, NULL, NUMA_NO_NODE); 0, NULL, NUMA_NO_NODE);
} }
int __init setup_emu2phys_nid(int *dfl_phys_nid) static int __init setup_emu2phys_nid(int *dfl_phys_nid)
{ {
int i, max_emu_nid = 0; int i, max_emu_nid = 0;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/libnvdimm.h>
#include <asm/e820/api.h> #include <asm/e820/api.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -304,11 +305,13 @@ void clflush_cache_range(void *vaddr, unsigned int size) ...@@ -304,11 +305,13 @@ void clflush_cache_range(void *vaddr, unsigned int size)
} }
EXPORT_SYMBOL_GPL(clflush_cache_range); EXPORT_SYMBOL_GPL(clflush_cache_range);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_invalidate_pmem(void *addr, size_t size) void arch_invalidate_pmem(void *addr, size_t size)
{ {
clflush_cache_range(addr, size); clflush_cache_range(addr, size);
} }
EXPORT_SYMBOL_GPL(arch_invalidate_pmem); EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
static void __cpa_flush_all(void *arg) static void __cpa_flush_all(void *arg)
{ {
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/set_memory.h>
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
...@@ -554,13 +555,6 @@ static inline bool pti_kernel_image_global_ok(void) ...@@ -554,13 +555,6 @@ static inline bool pti_kernel_image_global_ok(void)
return true; return true;
} }
/*
* This is the only user for these and it is not arch-generic
* like the other set_memory.h functions. Just extern them.
*/
extern int set_memory_nonglobal(unsigned long addr, int numpages);
extern int set_memory_global(unsigned long addr, int numpages);
/* /*
* For some configurations, map all of kernel text into the user page * For some configurations, map all of kernel text into the user page
* tables. This reduces TLB misses, especially on non-PCID systems. * tables. This reduces TLB misses, especially on non-PCID systems.
......
...@@ -454,12 +454,13 @@ extern void __user *compat_alloc_user_space(unsigned long len); ...@@ -454,12 +454,13 @@ extern void __user *compat_alloc_user_space(unsigned long len);
int compat_restore_altstack(const compat_stack_t __user *uss); int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long); int __compat_save_altstack(compat_stack_t __user *, unsigned long);
#define compat_save_altstack_ex(uss, sp) do { \ #define unsafe_compat_save_altstack(uss, sp, label) do { \
compat_stack_t __user *__uss = uss; \ compat_stack_t __user *__uss = uss; \
struct task_struct *t = current; \ struct task_struct *t = current; \
put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ unsafe_put_user(ptr_to_compat((void __user *)t->sas_ss_sp), \
put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ &__uss->ss_sp, label); \
put_user_ex(t->sas_ss_size, &__uss->ss_size); \ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
if (t->sas_ss_flags & SS_AUTODISARM) \ if (t->sas_ss_flags & SS_AUTODISARM) \
sas_ss_reset(t); \ sas_ss_reset(t); \
} while (0); } while (0);
......
...@@ -1243,4 +1243,6 @@ struct linux_efi_memreserve { ...@@ -1243,4 +1243,6 @@ struct linux_efi_memreserve {
#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
/ sizeof(((struct linux_efi_memreserve *)0)->entry[0])) / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
#endif /* _LINUX_EFI_H */ #endif /* _LINUX_EFI_H */
...@@ -444,12 +444,12 @@ void signals_init(void); ...@@ -444,12 +444,12 @@ void signals_init(void);
int restore_altstack(const stack_t __user *); int restore_altstack(const stack_t __user *);
int __save_altstack(stack_t __user *, unsigned long); int __save_altstack(stack_t __user *, unsigned long);
#define save_altstack_ex(uss, sp) do { \ #define unsafe_save_altstack(uss, sp, label) do { \
stack_t __user *__uss = uss; \ stack_t __user *__uss = uss; \
struct task_struct *t = current; \ struct task_struct *t = current; \
put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \ unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \
put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
put_user_ex(t->sas_ss_size, &__uss->ss_size); \ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
if (t->sas_ss_flags & SS_AUTODISARM) \ if (t->sas_ss_flags & SS_AUTODISARM) \
sas_ss_reset(t); \ sas_ss_reset(t); \
} while (0); } while (0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment