Commit 1fe3f29e authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'x86/fpu', 'x86/mm' and 'x86/asm' into x86/pkeys

Provide a stable basis for the pkeys patches, which touches various
x86 details.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
...@@ -666,7 +666,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -666,7 +666,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
clearcpuid=BITNUM [X86] clearcpuid=BITNUM [X86]
Disable CPUID feature X for the kernel. See Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeature.h for the valid bit arch/x86/include/asm/cpufeatures.h for the valid bit
numbers. Note the Linux specific bits are not necessarily numbers. Note the Linux specific bits are not necessarily
stable over kernel options, but the vendor specific stable over kernel options, but the vendor specific
ones should be. ones should be.
...@@ -2566,6 +2566,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -2566,6 +2566,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nointroute [IA-64] nointroute [IA-64]
noinvpcid [X86] Disable the INVPCID cpu feature.
nojitter [IA-64] Disables jitter checking for ITC timers. nojitter [IA-64] Disables jitter checking for ITC timers.
no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver
......
...@@ -350,16 +350,6 @@ config DEBUG_IMR_SELFTEST ...@@ -350,16 +350,6 @@ config DEBUG_IMR_SELFTEST
If unsure say N here. If unsure say N here.
config X86_DEBUG_STATIC_CPU_HAS
bool "Debug alternatives"
depends on DEBUG_KERNEL
---help---
This option causes additional code to be generated which
fails if static_cpu_has() is used before alternatives have
run.
If unsure, say N.
config X86_DEBUG_FPU config X86_DEBUG_FPU
bool "Debug the x86 FPU code" bool "Debug the x86 FPU code"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
......
#ifndef BOOT_CPUFLAGS_H #ifndef BOOT_CPUFLAGS_H
#define BOOT_CPUFLAGS_H #define BOOT_CPUFLAGS_H
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
struct cpu_features { struct cpu_features {
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "../include/asm/required-features.h" #include "../include/asm/required-features.h"
#include "../include/asm/disabled-features.h" #include "../include/asm/disabled-features.h"
#include "../include/asm/cpufeature.h" #include "../include/asm/cpufeatures.h"
#include "../kernel/cpu/capflags.c" #include "../kernel/cpu/capflags.c"
int main(void) int main(void)
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include <linux/crc32.h> #include <linux/crc32.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/fpu/internal.h> #include <asm/fpu/internal.h>
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
......
...@@ -201,37 +201,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -201,37 +201,6 @@ For 32-bit we have the following conventions - kernel is built with
.byte 0xf1 .byte 0xf1
.endm .endm
#else /* CONFIG_X86_64 */
/*
* For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
* are different from the entry_32.S versions in not changing the segment
* registers. So only suitable for in kernel use, not when transitioning
* from or to user space. The resulting stack frame is not a standard
* pt_regs frame. The main use case is calling C code from assembler
* when all the registers need to be preserved.
*/
.macro SAVE_ALL
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
.endm
.macro RESTORE_ALL
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
.endm
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
/* /*
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cpufeature.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h> #include <trace/events/syscalls.h>
...@@ -344,6 +345,32 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs) ...@@ -344,6 +345,32 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
prepare_exit_to_usermode(regs); prepare_exit_to_usermode(regs);
} }
#ifdef CONFIG_X86_64
__visible void do_syscall_64(struct pt_regs *regs)
{
struct thread_info *ti = pt_regs_to_thread_info(regs);
unsigned long nr = regs->orig_ax;
local_irq_enable();
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
nr = syscall_trace_enter(regs);
/*
* NB: Native and x32 syscalls are dispatched from the same
* table. The only functional difference is the x32 bit in
* regs->orig_ax, which changes the behavior of some syscalls.
*/
if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
regs->ax = sys_call_table[nr & __SYSCALL_MASK](
regs->di, regs->si, regs->dx,
regs->r10, regs->r8, regs->r9);
}
syscall_return_slowpath(regs);
}
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
/* /*
* Does a 32-bit syscall. Called with IRQs on and does all entry and * Does a 32-bit syscall. Called with IRQs on and does all entry and
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/irq_vectors.h> #include <asm/irq_vectors.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
......
This diff is collapsed.
...@@ -6,17 +6,11 @@ ...@@ -6,17 +6,11 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/syscall.h> #include <asm/syscall.h>
#ifdef CONFIG_IA32_EMULATION #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#define SYM(sym, compat) compat
#else
#define SYM(sym, compat) sym
#endif
#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long SYM(sym, compat)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#include <asm/syscalls_32.h> #include <asm/syscalls_32.h>
#undef __SYSCALL_I386 #undef __SYSCALL_I386
#define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat), #define __SYSCALL_I386(nr, sym, qual) [nr] = sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
......
...@@ -6,19 +6,14 @@ ...@@ -6,19 +6,14 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/syscall.h> #include <asm/syscall.h>
#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) #define __SYSCALL_64_QUAL_(sym) sym
#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym
#ifdef CONFIG_X86_X32_ABI #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
#else
# define __SYSCALL_X32(nr, sym, compat) /* nothing */
#endif
#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#include <asm/syscalls_64.h> #include <asm/syscalls_64.h>
#undef __SYSCALL_64 #undef __SYSCALL_64
#define __SYSCALL_64(nr, sym, compat) [nr] = sym, #define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym),
extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
12 common brk sys_brk 12 common brk sys_brk
13 64 rt_sigaction sys_rt_sigaction 13 64 rt_sigaction sys_rt_sigaction
14 common rt_sigprocmask sys_rt_sigprocmask 14 common rt_sigprocmask sys_rt_sigprocmask
15 64 rt_sigreturn stub_rt_sigreturn 15 64 rt_sigreturn sys_rt_sigreturn/ptregs
16 64 ioctl sys_ioctl 16 64 ioctl sys_ioctl
17 common pread64 sys_pread64 17 common pread64 sys_pread64
18 common pwrite64 sys_pwrite64 18 common pwrite64 sys_pwrite64
...@@ -62,10 +62,10 @@ ...@@ -62,10 +62,10 @@
53 common socketpair sys_socketpair 53 common socketpair sys_socketpair
54 64 setsockopt sys_setsockopt 54 64 setsockopt sys_setsockopt
55 64 getsockopt sys_getsockopt 55 64 getsockopt sys_getsockopt
56 common clone stub_clone 56 common clone sys_clone/ptregs
57 common fork stub_fork 57 common fork sys_fork/ptregs
58 common vfork stub_vfork 58 common vfork sys_vfork/ptregs
59 64 execve stub_execve 59 64 execve sys_execve/ptregs
60 common exit sys_exit 60 common exit sys_exit
61 common wait4 sys_wait4 61 common wait4 sys_wait4
62 common kill sys_kill 62 common kill sys_kill
...@@ -178,7 +178,7 @@ ...@@ -178,7 +178,7 @@
169 common reboot sys_reboot 169 common reboot sys_reboot
170 common sethostname sys_sethostname 170 common sethostname sys_sethostname
171 common setdomainname sys_setdomainname 171 common setdomainname sys_setdomainname
172 common iopl sys_iopl 172 common iopl sys_iopl/ptregs
173 common ioperm sys_ioperm 173 common ioperm sys_ioperm
174 64 create_module 174 64 create_module
175 common init_module sys_init_module 175 common init_module sys_init_module
...@@ -328,7 +328,7 @@ ...@@ -328,7 +328,7 @@
319 common memfd_create sys_memfd_create 319 common memfd_create sys_memfd_create
320 common kexec_file_load sys_kexec_file_load 320 common kexec_file_load sys_kexec_file_load
321 common bpf sys_bpf 321 common bpf sys_bpf
322 64 execveat stub_execveat 322 64 execveat sys_execveat/ptregs
323 common userfaultfd sys_userfaultfd 323 common userfaultfd sys_userfaultfd
324 common membarrier sys_membarrier 324 common membarrier sys_membarrier
325 common mlock2 sys_mlock2 325 common mlock2 sys_mlock2
...@@ -339,14 +339,14 @@ ...@@ -339,14 +339,14 @@
# for native 64-bit operation. # for native 64-bit operation.
# #
512 x32 rt_sigaction compat_sys_rt_sigaction 512 x32 rt_sigaction compat_sys_rt_sigaction
513 x32 rt_sigreturn stub_x32_rt_sigreturn 513 x32 rt_sigreturn sys32_x32_rt_sigreturn
514 x32 ioctl compat_sys_ioctl 514 x32 ioctl compat_sys_ioctl
515 x32 readv compat_sys_readv 515 x32 readv compat_sys_readv
516 x32 writev compat_sys_writev 516 x32 writev compat_sys_writev
517 x32 recvfrom compat_sys_recvfrom 517 x32 recvfrom compat_sys_recvfrom
518 x32 sendmsg compat_sys_sendmsg 518 x32 sendmsg compat_sys_sendmsg
519 x32 recvmsg compat_sys_recvmsg 519 x32 recvmsg compat_sys_recvmsg
520 x32 execve stub_x32_execve 520 x32 execve compat_sys_execve/ptregs
521 x32 ptrace compat_sys_ptrace 521 x32 ptrace compat_sys_ptrace
522 x32 rt_sigpending compat_sys_rt_sigpending 522 x32 rt_sigpending compat_sys_rt_sigpending
523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait 523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait
...@@ -371,4 +371,4 @@ ...@@ -371,4 +371,4 @@
542 x32 getsockopt compat_sys_getsockopt 542 x32 getsockopt compat_sys_getsockopt
543 x32 io_setup compat_sys_io_setup 543 x32 io_setup compat_sys_io_setup
544 x32 io_submit compat_sys_io_submit 544 x32 io_submit compat_sys_io_submit
545 x32 execveat stub_x32_execveat 545 x32 execveat compat_sys_execveat/ptregs
...@@ -3,13 +3,63 @@ ...@@ -3,13 +3,63 @@
in="$1" in="$1"
out="$2" out="$2"
syscall_macro() {
abi="$1"
nr="$2"
entry="$3"
# Entry can be either just a function name or "function/qualifier"
real_entry="${entry%%/*}"
qualifier="${entry:${#real_entry}}" # Strip the function name
qualifier="${qualifier:1}" # Strip the slash, if any
echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
}
emit() {
abi="$1"
nr="$2"
entry="$3"
compat="$4"
if [ "$abi" == "64" -a -n "$compat" ]; then
echo "a compat entry for a 64-bit syscall makes no sense" >&2
exit 1
fi
if [ -z "$compat" ]; then
if [ -n "$entry" ]; then
syscall_macro "$abi" "$nr" "$entry"
fi
else
echo "#ifdef CONFIG_X86_32"
if [ -n "$entry" ]; then
syscall_macro "$abi" "$nr" "$entry"
fi
echo "#else"
syscall_macro "$abi" "$nr" "$compat"
echo "#endif"
fi
}
grep '^[0-9]' "$in" | sort -n | ( grep '^[0-9]' "$in" | sort -n | (
while read nr abi name entry compat; do while read nr abi name entry compat; do
abi=`echo "$abi" | tr '[a-z]' '[A-Z]'` abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
if [ -n "$compat" ]; then if [ "$abi" == "COMMON" -o "$abi" == "64" ]; then
echo "__SYSCALL_${abi}($nr, $entry, $compat)" # COMMON is the same as 64, except that we don't expect X32
elif [ -n "$entry" ]; then # programs to use it. Our expectation has nothing to do with
echo "__SYSCALL_${abi}($nr, $entry, $entry)" # any generated code, so treat them the same.
emit 64 "$nr" "$entry" "$compat"
elif [ "$abi" == "X32" ]; then
# X32 is equivalent to 64 on an X32-compatible kernel.
echo "#ifdef CONFIG_X86_X32_ABI"
emit 64 "$nr" "$entry" "$compat"
echo "#endif"
elif [ "$abi" == "I386" ]; then
emit "$abi" "$nr" "$entry" "$compat"
else
echo "Unknown abi $abi" >&2
exit 1
fi fi
done done
) > "$out" ) > "$out"
...@@ -150,16 +150,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, ...@@ -150,16 +150,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
} }
fprintf(outfile, "\n};\n\n"); fprintf(outfile, "\n};\n\n");
fprintf(outfile, "static struct page *pages[%lu];\n\n",
mapping_size / 4096);
fprintf(outfile, "const struct vdso_image %s = {\n", name); fprintf(outfile, "const struct vdso_image %s = {\n", name);
fprintf(outfile, "\t.data = raw_data,\n"); fprintf(outfile, "\t.data = raw_data,\n");
fprintf(outfile, "\t.size = %lu,\n", mapping_size); fprintf(outfile, "\t.size = %lu,\n", mapping_size);
fprintf(outfile, "\t.text_mapping = {\n");
fprintf(outfile, "\t\t.name = \"[vdso]\",\n");
fprintf(outfile, "\t\t.pages = pages,\n");
fprintf(outfile, "\t},\n");
if (alt_sec) { if (alt_sec) {
fprintf(outfile, "\t.alt = %lu,\n", fprintf(outfile, "\t.alt = %lu,\n",
(unsigned long)GET_LE(&alt_sec->sh_offset)); (unsigned long)GET_LE(&alt_sec->sh_offset));
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <asm/cpufeature.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/vdso.h> #include <asm/vdso.h>
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
*/ */
#include <asm/dwarf2.h> #include <asm/dwarf2.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
/* /*
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/cpufeature.h>
#if defined(CONFIG_X86_64) #if defined(CONFIG_X86_64)
unsigned int __read_mostly vdso64_enabled = 1; unsigned int __read_mostly vdso64_enabled = 1;
...@@ -27,13 +28,7 @@ unsigned int __read_mostly vdso64_enabled = 1; ...@@ -27,13 +28,7 @@ unsigned int __read_mostly vdso64_enabled = 1;
void __init init_vdso_image(const struct vdso_image *image) void __init init_vdso_image(const struct vdso_image *image)
{ {
int i;
int npages = (image->size) / PAGE_SIZE;
BUG_ON(image->size % PAGE_SIZE != 0); BUG_ON(image->size % PAGE_SIZE != 0);
for (i = 0; i < npages; i++)
image->text_mapping.pages[i] =
virt_to_page(image->data + i*PAGE_SIZE);
apply_alternatives((struct alt_instr *)(image->data + image->alt), apply_alternatives((struct alt_instr *)(image->data + image->alt),
(struct alt_instr *)(image->data + image->alt + (struct alt_instr *)(image->data + image->alt +
...@@ -90,18 +85,87 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) ...@@ -90,18 +85,87 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
#endif #endif
} }
static int vdso_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
const struct vdso_image *image = vma->vm_mm->context.vdso_image;
if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
return VM_FAULT_SIGBUS;
vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
get_page(vmf->page);
return 0;
}
static const struct vm_special_mapping text_mapping = {
.name = "[vdso]",
.fault = vdso_fault,
};
static int vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
const struct vdso_image *image = vma->vm_mm->context.vdso_image;
long sym_offset;
int ret = -EFAULT;
if (!image)
return VM_FAULT_SIGBUS;
sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
image->sym_vvar_start;
/*
* Sanity check: a symbol offset of zero means that the page
* does not exist for this vdso image, not that the page is at
* offset zero relative to the text mapping. This should be
* impossible here, because sym_offset should only be zero for
* the page past the end of the vvar mapping.
*/
if (sym_offset == 0)
return VM_FAULT_SIGBUS;
if (sym_offset == image->sym_vvar_page) {
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
__pa_symbol(&__vvar_page) >> PAGE_SHIFT);
} else if (sym_offset == image->sym_hpet_page) {
#ifdef CONFIG_HPET_TIMER
if (hpet_address && vclock_was_used(VCLOCK_HPET)) {
ret = vm_insert_pfn_prot(
vma,
(unsigned long)vmf->virtual_address,
hpet_address >> PAGE_SHIFT,
pgprot_noncached(PAGE_READONLY));
}
#endif
} else if (sym_offset == image->sym_pvclock_page) {
struct pvclock_vsyscall_time_info *pvti =
pvclock_pvti_cpu0_va();
if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
ret = vm_insert_pfn(
vma,
(unsigned long)vmf->virtual_address,
__pa(pvti) >> PAGE_SHIFT);
}
}
if (ret == 0 || ret == -EBUSY)
return VM_FAULT_NOPAGE;
return VM_FAULT_SIGBUS;
}
static int map_vdso(const struct vdso_image *image, bool calculate_addr) static int map_vdso(const struct vdso_image *image, bool calculate_addr)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long addr, text_start; unsigned long addr, text_start;
int ret = 0; int ret = 0;
static struct page *no_pages[] = {NULL}; static const struct vm_special_mapping vvar_mapping = {
static struct vm_special_mapping vvar_mapping = {
.name = "[vvar]", .name = "[vvar]",
.pages = no_pages, .fault = vvar_fault,
}; };
struct pvclock_vsyscall_time_info *pvti;
if (calculate_addr) { if (calculate_addr) {
addr = vdso_addr(current->mm->start_stack, addr = vdso_addr(current->mm->start_stack,
...@@ -121,6 +185,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) ...@@ -121,6 +185,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
text_start = addr - image->sym_vvar_start; text_start = addr - image->sym_vvar_start;
current->mm->context.vdso = (void __user *)text_start; current->mm->context.vdso = (void __user *)text_start;
current->mm->context.vdso_image = image;
/* /*
* MAYWRITE to allow gdb to COW and set breakpoints * MAYWRITE to allow gdb to COW and set breakpoints
...@@ -130,7 +195,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) ...@@ -130,7 +195,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
image->size, image->size,
VM_READ|VM_EXEC| VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&image->text_mapping); &text_mapping);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
...@@ -140,7 +205,8 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) ...@@ -140,7 +205,8 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
vma = _install_special_mapping(mm, vma = _install_special_mapping(mm,
addr, addr,
-image->sym_vvar_start, -image->sym_vvar_start,
VM_READ|VM_MAYREAD, VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
VM_PFNMAP,
&vvar_mapping); &vvar_mapping);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
...@@ -148,41 +214,6 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) ...@@ -148,41 +214,6 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
goto up_fail; goto up_fail;
} }
if (image->sym_vvar_page)
ret = remap_pfn_range(vma,
text_start + image->sym_vvar_page,
__pa_symbol(&__vvar_page) >> PAGE_SHIFT,
PAGE_SIZE,
PAGE_READONLY);
if (ret)
goto up_fail;
#ifdef CONFIG_HPET_TIMER
if (hpet_address && image->sym_hpet_page) {
ret = io_remap_pfn_range(vma,
text_start + image->sym_hpet_page,
hpet_address >> PAGE_SHIFT,
PAGE_SIZE,
pgprot_noncached(PAGE_READONLY));
if (ret)
goto up_fail;
}
#endif
pvti = pvclock_pvti_cpu0_va();
if (pvti && image->sym_pvclock_page) {
ret = remap_pfn_range(vma,
text_start + image->sym_pvclock_page,
__pa(pvti) >> PAGE_SHIFT,
PAGE_SIZE,
PAGE_READONLY);
if (ret)
goto up_fail;
}
up_fail: up_fail:
if (ret) if (ret)
current->mm->context.vdso = NULL; current->mm->context.vdso = NULL;
...@@ -254,7 +285,7 @@ static void vgetcpu_cpu_init(void *arg) ...@@ -254,7 +285,7 @@ static void vgetcpu_cpu_init(void *arg)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
node = cpu_to_node(cpu); node = cpu_to_node(cpu);
#endif #endif
if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) if (static_cpu_has(X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu); write_rdtscp_aux((node << 12) | cpu);
/* /*
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <asm/vgtod.h> #include <asm/vgtod.h>
#include <asm/vvar.h> #include <asm/vvar.h>
int vclocks_used __read_mostly;
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data); DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
void update_vsyscall_tz(void) void update_vsyscall_tz(void)
...@@ -26,12 +28,17 @@ void update_vsyscall_tz(void) ...@@ -26,12 +28,17 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timekeeper *tk) void update_vsyscall(struct timekeeper *tk)
{ {
int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data; struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
/* Mark the new vclock used. */
BUILD_BUG_ON(VCLOCK_MAX >= 32);
WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode));
gtod_write_begin(vdata); gtod_write_begin(vdata);
/* copy vsyscall data */ /* copy vsyscall data */
vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; vdata->vclock_mode = vclock_mode;
vdata->cycle_last = tk->tkr_mono.cycle_last; vdata->cycle_last = tk->tkr_mono.cycle_last;
vdata->mask = tk->tkr_mono.mask; vdata->mask = tk->tkr_mono.mask;
vdata->mult = tk->tkr_mono.mult; vdata->mult = tk->tkr_mono.mult;
......
...@@ -151,12 +151,6 @@ static inline int alternatives_text_reserved(void *start, void *end) ...@@ -151,12 +151,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".popsection" ".popsection"
/*
* This must be included *after* the definition of ALTERNATIVE due to
* <asm/arch_hweight.h>
*/
#include <asm/cpufeature.h>
/* /*
* Alternative instructions for different CPU types or capabilities. * Alternative instructions for different CPU types or capabilities.
* *
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
......
#ifndef _ASM_X86_HWEIGHT_H #ifndef _ASM_X86_HWEIGHT_H
#define _ASM_X86_HWEIGHT_H #define _ASM_X86_HWEIGHT_H
#include <asm/cpufeatures.h>
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* popcnt %edi, %eax -- redundant REX prefix for alignment */ /* popcnt %edi, %eax -- redundant REX prefix for alignment */
#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7" #define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
......
...@@ -91,7 +91,7 @@ set_bit(long nr, volatile unsigned long *addr) ...@@ -91,7 +91,7 @@ set_bit(long nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __set_bit(long nr, volatile unsigned long *addr) static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
} }
...@@ -128,13 +128,13 @@ clear_bit(long nr, volatile unsigned long *addr) ...@@ -128,13 +128,13 @@ clear_bit(long nr, volatile unsigned long *addr)
* clear_bit() is atomic and implies release semantics before the memory * clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock. * operation. It can be used for an unlock.
*/ */
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{ {
barrier(); barrier();
clear_bit(nr, addr); clear_bit(nr, addr);
} }
static inline void __clear_bit(long nr, volatile unsigned long *addr) static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
} }
...@@ -151,7 +151,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr) ...@@ -151,7 +151,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
* No memory barrier is required here, because x86 cannot reorder stores past * No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock. * older loads. Same principle as spin_unlock.
*/ */
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{ {
barrier(); barrier();
__clear_bit(nr, addr); __clear_bit(nr, addr);
...@@ -166,7 +166,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) ...@@ -166,7 +166,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect * If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds. * may be that only one operation succeeds.
*/ */
static inline void __change_bit(long nr, volatile unsigned long *addr) static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{ {
asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
} }
...@@ -180,7 +180,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr) ...@@ -180,7 +180,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void change_bit(long nr, volatile unsigned long *addr) static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{ {
if (IS_IMMEDIATE(nr)) { if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0" asm volatile(LOCK_PREFIX "xorb %1,%0"
...@@ -201,7 +201,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) ...@@ -201,7 +201,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int test_and_set_bit(long nr, volatile unsigned long *addr) static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
} }
...@@ -228,7 +228,7 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr) ...@@ -228,7 +228,7 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed * If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock. * but actually fail. You must protect multiple accesses with a lock.
*/ */
static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
...@@ -247,7 +247,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) ...@@ -247,7 +247,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
} }
...@@ -268,7 +268,7 @@ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) ...@@ -268,7 +268,7 @@ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
* accessed from a hypervisor on the same CPU if running in a VM: don't change * accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c * this without also updating arch/x86/kernel/kvm.c
*/ */
static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
...@@ -280,7 +280,7 @@ static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) ...@@ -280,7 +280,7 @@ static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
} }
/* WARNING: non atomic and it can be reordered! */ /* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
...@@ -300,7 +300,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) ...@@ -300,7 +300,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int test_and_change_bit(long nr, volatile unsigned long *addr) static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
} }
...@@ -311,7 +311,7 @@ static __always_inline int constant_test_bit(long nr, const volatile unsigned lo ...@@ -311,7 +311,7 @@ static __always_inline int constant_test_bit(long nr, const volatile unsigned lo
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0; (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
} }
static inline int variable_test_bit(long nr, volatile const unsigned long *addr) static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
{ {
int oldbit; int oldbit;
...@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr); ...@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
* *
* Undefined if no bit exists, so code should check against 0 first. * Undefined if no bit exists, so code should check against 0 first.
*/ */
static inline unsigned long __ffs(unsigned long word) static __always_inline unsigned long __ffs(unsigned long word)
{ {
asm("rep; bsf %1,%0" asm("rep; bsf %1,%0"
: "=r" (word) : "=r" (word)
...@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word) ...@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
* *
* Undefined if no zero exists, so code should check against ~0UL first. * Undefined if no zero exists, so code should check against ~0UL first.
*/ */
static inline unsigned long ffz(unsigned long word) static __always_inline unsigned long ffz(unsigned long word)
{ {
asm("rep; bsf %1,%0" asm("rep; bsf %1,%0"
: "=r" (word) : "=r" (word)
...@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word) ...@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
* *
* Undefined if no set bit exists, so code should check against 0 first. * Undefined if no set bit exists, so code should check against 0 first.
*/ */
static inline unsigned long __fls(unsigned long word) static __always_inline unsigned long __fls(unsigned long word)
{ {
asm("bsr %1,%0" asm("bsr %1,%0"
: "=r" (word) : "=r" (word)
...@@ -393,7 +393,7 @@ static inline unsigned long __fls(unsigned long word) ...@@ -393,7 +393,7 @@ static inline unsigned long __fls(unsigned long word)
* set bit if value is nonzero. The first (least significant) bit * set bit if value is nonzero. The first (least significant) bit
* is at position 1. * is at position 1.
*/ */
static inline int ffs(int x) static __always_inline int ffs(int x)
{ {
int r; int r;
...@@ -434,7 +434,7 @@ static inline int ffs(int x) ...@@ -434,7 +434,7 @@ static inline int ffs(int x)
* set bit if value is nonzero. The last (most significant) bit is * set bit if value is nonzero. The last (most significant) bit is
* at position 32. * at position 32.
*/ */
static inline int fls(int x) static __always_inline int fls(int x)
{ {
int r; int r;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */ #define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */ #define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */ #define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
#define VCLOCK_MAX 3
struct arch_clocksource_data { struct arch_clocksource_data {
int vclock_mode; int vclock_mode;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define ASM_X86_CMPXCHG_H #define ASM_X86_CMPXCHG_H
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */ #include <asm/alternative.h> /* Provides LOCK_PREFIX */
/* /*
......
This diff is collapsed.
This diff is collapsed.
...@@ -15,7 +15,7 @@ static __always_inline __init void *dmi_alloc(unsigned len) ...@@ -15,7 +15,7 @@ static __always_inline __init void *dmi_alloc(unsigned len)
/* Use early IO mappings for DMI because it's initialized early */ /* Use early IO mappings for DMI because it's initialized early */
#define dmi_early_remap early_ioremap #define dmi_early_remap early_ioremap
#define dmi_early_unmap early_iounmap #define dmi_early_unmap early_iounmap
#define dmi_remap ioremap #define dmi_remap ioremap_cache
#define dmi_unmap iounmap #define dmi_unmap iounmap
#endif /* _ASM_X86_DMI_H */ #endif /* _ASM_X86_DMI_H */
...@@ -138,7 +138,7 @@ extern void reserve_top_address(unsigned long reserve); ...@@ -138,7 +138,7 @@ extern void reserve_top_address(unsigned long reserve);
extern int fixmaps_set; extern int fixmaps_set;
extern pte_t *kmap_pte; extern pte_t *kmap_pte;
extern pgprot_t kmap_prot; #define kmap_prot PAGE_KERNEL
extern pte_t *pkmap_page_table; extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/user.h> #include <asm/user.h>
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm/fpu/xstate.h> #include <asm/fpu/xstate.h>
#include <asm/cpufeature.h>
/* /*
* High level FPU state handling functions: * High level FPU state handling functions:
...@@ -58,22 +59,22 @@ extern u64 fpu__get_supported_xfeatures_mask(void); ...@@ -58,22 +59,22 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
*/ */
static __always_inline __pure bool use_eager_fpu(void) static __always_inline __pure bool use_eager_fpu(void)
{ {
return static_cpu_has_safe(X86_FEATURE_EAGER_FPU); return static_cpu_has(X86_FEATURE_EAGER_FPU);
} }
static __always_inline __pure bool use_xsaveopt(void) static __always_inline __pure bool use_xsaveopt(void)
{ {
return static_cpu_has_safe(X86_FEATURE_XSAVEOPT); return static_cpu_has(X86_FEATURE_XSAVEOPT);
} }
static __always_inline __pure bool use_xsave(void) static __always_inline __pure bool use_xsave(void)
{ {
return static_cpu_has_safe(X86_FEATURE_XSAVE); return static_cpu_has(X86_FEATURE_XSAVE);
} }
static __always_inline __pure bool use_fxsr(void) static __always_inline __pure bool use_fxsr(void)
{ {
return static_cpu_has_safe(X86_FEATURE_FXSR); return static_cpu_has(X86_FEATURE_FXSR);
} }
/* /*
...@@ -300,7 +301,7 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) ...@@ -300,7 +301,7 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
WARN_ON(system_state != SYSTEM_BOOTING); WARN_ON(system_state != SYSTEM_BOOTING);
if (static_cpu_has_safe(X86_FEATURE_XSAVES)) if (static_cpu_has(X86_FEATURE_XSAVES))
XSTATE_OP(XSAVES, xstate, lmask, hmask, err); XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
else else
XSTATE_OP(XSAVE, xstate, lmask, hmask, err); XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
...@@ -322,7 +323,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) ...@@ -322,7 +323,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
WARN_ON(system_state != SYSTEM_BOOTING); WARN_ON(system_state != SYSTEM_BOOTING);
if (static_cpu_has_safe(X86_FEATURE_XSAVES)) if (static_cpu_has(X86_FEATURE_XSAVES))
XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
else else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
...@@ -460,7 +461,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) ...@@ -460,7 +461,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
* pending. Clear the x87 state here by setting it to fixed values. * pending. Clear the x87 state here by setting it to fixed values.
* "m" is a random variable that should be in L1. * "m" is a random variable that should be in L1.
*/ */
if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
asm volatile( asm volatile(
"fnclex\n\t" "fnclex\n\t"
"emms\n\t" "emms\n\t"
...@@ -589,7 +590,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) ...@@ -589,7 +590,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
* If the task has used the math, pre-load the FPU on xsave processors * If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math. * or if the past 5 consecutive context-switches used math.
*/ */
fpu.preload = new_fpu->fpstate_active && fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
new_fpu->fpstate_active &&
(use_eager_fpu() || new_fpu->counter > 5); (use_eager_fpu() || new_fpu->counter > 5);
if (old_fpu->fpregs_active) { if (old_fpu->fpregs_active) {
......
#ifdef __ASSEMBLY__ #ifndef _ASM_X86_FRAME_H
#define _ASM_X86_FRAME_H
#include <asm/asm.h> #include <asm/asm.h>
/* The annotation hides the frame from the unwinder and makes it look /*
like a ordinary ebp save/restore. This avoids some special cases for * These are stack frame creation macros. They should be used by every
frame pointer later */ * callable non-leaf asm function to make kernel stack traces more reliable.
*/
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
.macro FRAME
__ASM_SIZE(push,) %__ASM_REG(bp) #ifdef __ASSEMBLY__
__ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
.endm .macro FRAME_BEGIN
.macro ENDFRAME push %_ASM_BP
__ASM_SIZE(pop,) %__ASM_REG(bp) _ASM_MOV %_ASM_SP, %_ASM_BP
.endm .endm
#else
.macro FRAME .macro FRAME_END
.endm pop %_ASM_BP
.macro ENDFRAME .endm
.endm
#endif #else /* !__ASSEMBLY__ */
#define FRAME_BEGIN \
"push %" _ASM_BP "\n" \
_ASM_MOV "%" _ASM_SP ", %" _ASM_BP "\n"
#define FRAME_END "pop %" _ASM_BP "\n"
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define FRAME_OFFSET __ASM_SEL(4, 8)
#else /* !CONFIG_FRAME_POINTER */
#define FRAME_BEGIN
#define FRAME_END
#define FRAME_OFFSET 0
#endif /* CONFIG_FRAME_POINTER */
#endif /* _ASM_X86_FRAME_H */
#ifndef _ASM_IRQ_WORK_H #ifndef _ASM_IRQ_WORK_H
#define _ASM_IRQ_WORK_H #define _ASM_IRQ_WORK_H
#include <asm/processor.h> #include <asm/cpufeature.h>
static inline bool arch_irq_work_has_interrupt(void) static inline bool arch_irq_work_has_interrupt(void)
{ {
......
...@@ -19,7 +19,8 @@ typedef struct { ...@@ -19,7 +19,8 @@ typedef struct {
#endif #endif
struct mutex lock; struct mutex lock;
void __user *vdso; void __user *vdso; /* vdso base address */
const struct vdso_image *vdso_image; /* vdso image in use */
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
} mm_context_t; } mm_context_t;
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/cpufeature.h>
#define MWAIT_SUBSTATE_MASK 0xf #define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf #define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4 #define MWAIT_SUBSTATE_SIZE 4
......
...@@ -13,7 +13,7 @@ struct vm86; ...@@ -13,7 +13,7 @@ struct vm86;
#include <asm/types.h> #include <asm/types.h>
#include <uapi/asm/sigcontext.h> #include <uapi/asm/sigcontext.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <asm/percpu.h> #include <asm/percpu.h>
...@@ -24,7 +24,6 @@ struct vm86; ...@@ -24,7 +24,6 @@ struct vm86;
#include <asm/fpu/types.h> #include <asm/fpu/types.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/cpumask.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/math64.h> #include <linux/math64.h>
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
/* "Raw" instruction opcodes */ /* "Raw" instruction opcodes */
#define __ASM_CLAC .byte 0x0f,0x01,0xca #define __ASM_CLAC .byte 0x0f,0x01,0xca
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#endif #endif
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/cpumask.h> #include <asm/cpumask.h>
#include <asm/cpufeature.h>
extern int smp_num_siblings; extern int smp_num_siblings;
extern unsigned int num_processors; extern unsigned int num_processors;
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
*/ */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct task_struct; struct task_struct;
#include <asm/processor.h> #include <asm/cpufeature.h>
#include <linux/atomic.h> #include <linux/atomic.h>
struct thread_info { struct thread_info {
......
...@@ -5,8 +5,57 @@ ...@@ -5,8 +5,57 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h> #include <asm/special_insns.h>
static inline void __invpcid(unsigned long pcid, unsigned long addr,
unsigned long type)
{
struct { u64 d[2]; } desc = { { pcid, addr } };
/*
* The memory clobber is because the whole point is to invalidate
* stale TLB entries and, especially if we're flushing global
* mappings, we don't want the compiler to reorder any subsequent
* memory accesses before the TLB flush.
*
* The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
* invpcid (%rcx), %rax in long mode.
*/
asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
: : "m" (desc), "a" (type), "c" (&desc) : "memory");
}
#define INVPCID_TYPE_INDIV_ADDR 0
#define INVPCID_TYPE_SINGLE_CTXT 1
#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
#define INVPCID_TYPE_ALL_NON_GLOBAL 3
/* Flush all mappings for a given pcid and addr, not including globals. */
static inline void invpcid_flush_one(unsigned long pcid,
unsigned long addr)
{
__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
}
/* Flush all mappings for a given PCID, not including globals. */
static inline void invpcid_flush_single_context(unsigned long pcid)
{
__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
}
/* Flush all mappings, including globals, for all PCIDs. */
static inline void invpcid_flush_all(void)
{
__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
}
/* Flush all mappings for all PCIDs except globals. */
static inline void invpcid_flush_all_nonglobals(void)
{
__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
}
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#else #else
...@@ -104,6 +153,15 @@ static inline void __native_flush_tlb_global(void) ...@@ -104,6 +153,15 @@ static inline void __native_flush_tlb_global(void)
{ {
unsigned long flags; unsigned long flags;
if (static_cpu_has(X86_FEATURE_INVPCID)) {
/*
* Using INVPCID is considerably faster than a pair of writes
* to CR4 sandwiched inside an IRQ flag save/restore.
*/
invpcid_flush_all();
return;
}
/* /*
* Read-modify-write to CR4 - protect it from preemption and * Read-modify-write to CR4 - protect it from preemption and
* from interrupts. (Use the raw variant because this code can * from interrupts. (Use the raw variant because this code can
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/page.h> #include <asm/page.h>
/* /*
......
...@@ -13,9 +13,6 @@ struct vdso_image { ...@@ -13,9 +13,6 @@ struct vdso_image {
void *data; void *data;
unsigned long size; /* Always a multiple of PAGE_SIZE */ unsigned long size; /* Always a multiple of PAGE_SIZE */
/* text_mapping.pages is big enough for data/size page pointers */
struct vm_special_mapping text_mapping;
unsigned long alt, alt_len; unsigned long alt, alt_len;
long sym_vvar_start; /* Negative offset to the vvar area */ long sym_vvar_start; /* Negative offset to the vvar area */
......
...@@ -37,6 +37,12 @@ struct vsyscall_gtod_data { ...@@ -37,6 +37,12 @@ struct vsyscall_gtod_data {
}; };
extern struct vsyscall_gtod_data vsyscall_gtod_data; extern struct vsyscall_gtod_data vsyscall_gtod_data;
extern int vclocks_used;
static inline bool vclock_was_used(int vclock)
{
return READ_ONCE(vclocks_used) & (1 << vclock);
}
static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s) static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
{ {
unsigned ret; unsigned ret;
......
...@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x) ...@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
unsigned long value; unsigned long value;
unsigned int id = (x >> 24) & 0xff; unsigned int id = (x >> 24) & 0xff;
if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
rdmsrl(MSR_FAM10H_NODE_ID, value); rdmsrl(MSR_FAM10H_NODE_ID, value);
id |= (value << 2) & 0xff00; id |= (value << 2) & 0xff00;
} }
...@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) ...@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
this_cpu_write(cpu_llc_id, node); this_cpu_write(cpu_llc_id, node);
/* Account for nodes per socket in multi-core-module processors */ /* Account for nodes per socket in multi-core-module processors */
if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) { if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
rdmsrl(MSR_FAM10H_NODE_ID, val); rdmsrl(MSR_FAM10H_NODE_ID, val);
nodes = ((val >> 3) & 7) + 1; nodes = ((val >> 3) & 7) + 1;
} }
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/lguest.h> #include <linux/lguest.h>
#include "../../../drivers/lguest/lg.h" #include "../../../drivers/lguest/lg.h"
#define __SYSCALL_I386(nr, sym, compat) [nr] = 1, #define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
static char syscalls[] = { static char syscalls[] = {
#include <asm/syscalls_32.h> #include <asm/syscalls_32.h>
}; };
......
...@@ -4,17 +4,11 @@ ...@@ -4,17 +4,11 @@
#include <asm/ia32.h> #include <asm/ia32.h>
#define __SYSCALL_64(nr, sym, compat) [nr] = 1, #define __SYSCALL_64(nr, sym, qual) [nr] = 1,
#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
#ifdef CONFIG_X86_X32_ABI
# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
#else
# define __SYSCALL_X32(nr, sym, compat) /* nothing */
#endif
static char syscalls_64[] = { static char syscalls_64[] = {
#include <asm/syscalls_64.h> #include <asm/syscalls_64.h>
}; };
#define __SYSCALL_I386(nr, sym, compat) [nr] = 1, #define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
static char syscalls_ia32[] = { static char syscalls_ia32[] = {
#include <asm/syscalls_32.h> #include <asm/syscalls_32.h>
}; };
......
...@@ -64,7 +64,7 @@ ifdef CONFIG_X86_FEATURE_NAMES ...@@ -64,7 +64,7 @@ ifdef CONFIG_X86_FEATURE_NAMES
quiet_cmd_mkcapflags = MKCAP $@ quiet_cmd_mkcapflags = MKCAP $@
cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@ cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
cpufeature = $(src)/../../include/asm/cpufeature.h cpufeature = $(src)/../../include/asm/cpufeatures.h
targets += capflags.c targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
......
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/processor.h> #include <asm/cpufeature.h>
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr.h> #include <asm/msr.h>
......
...@@ -162,6 +162,22 @@ static int __init x86_mpx_setup(char *s) ...@@ -162,6 +162,22 @@ static int __init x86_mpx_setup(char *s)
} }
__setup("nompx", x86_mpx_setup); __setup("nompx", x86_mpx_setup);
static int __init x86_noinvpcid_setup(char *s)
{
/* noinvpcid doesn't accept parameters */
if (s)
return -EINVAL;
/* do not emit a message if the feature is not present */
if (!boot_cpu_has(X86_FEATURE_INVPCID))
return 0;
setup_clear_cpu_cap(X86_FEATURE_INVPCID);
pr_info("noinvpcid: INVPCID feature disabled\n");
return 0;
}
early_param("noinvpcid", x86_noinvpcid_setup);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
static int cachesize_override = -1; static int cachesize_override = -1;
static int disable_x86_serial_nr = 1; static int disable_x86_serial_nr = 1;
...@@ -1475,20 +1491,6 @@ void cpu_init(void) ...@@ -1475,20 +1491,6 @@ void cpu_init(void)
} }
#endif #endif
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
void warn_pre_alternatives(void)
{
WARN(1, "You're using static_cpu_has before alternatives have run!\n");
}
EXPORT_SYMBOL_GPL(warn_pre_alternatives);
#endif
inline bool __static_cpu_has_safe(u16 bit)
{
return boot_cpu_has(bit);
}
EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
static void bsp_resume(void) static void bsp_resume(void)
{ {
if (this_cpu->c_bsp_resume) if (this_cpu->c_bsp_resume)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/tsc.h> #include <asm/tsc.h>
#include <asm/cpufeature.h>
#include "cpu.h" #include "cpu.h"
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/processor.h> #include <asm/cpufeature.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/bugs.h> #include <asm/bugs.h>
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/processor.h> #include <asm/cpufeature.h>
#include <asm/amd_nb.h> #include <asm/amd_nb.h>
#include <asm/smp.h> #include <asm/smp.h>
......
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/processor.h> #include <asm/cpufeature.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
......
#!/bin/sh #!/bin/sh
# #
# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h # Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
# #
IN=$1 IN=$1
...@@ -49,8 +49,8 @@ dump_array() ...@@ -49,8 +49,8 @@ dump_array()
trap 'rm "$OUT"' EXIT trap 'rm "$OUT"' EXIT
( (
echo "#ifndef _ASM_X86_CPUFEATURE_H" echo "#ifndef _ASM_X86_CPUFEATURES_H"
echo "#include <asm/cpufeature.h>" echo "#include <asm/cpufeatures.h>"
echo "#endif" echo "#endif"
echo "" echo ""
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <asm/processor.h> #include <asm/cpufeature.h>
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/msr.h> #include <asm/msr.h>
......
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/processor.h> #include <asm/cpufeature.h>
#include <asm/msr.h> #include <asm/msr.h>
#include "cpu.h" #include "cpu.h"
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/cpufeature.h>
/* /*
* The e820 map is the map that gets modified e.g. with command line parameters * The e820 map is the map that gets modified e.g. with command line parameters
......
...@@ -114,6 +114,10 @@ void __kernel_fpu_begin(void) ...@@ -114,6 +114,10 @@ void __kernel_fpu_begin(void)
kernel_fpu_disable(); kernel_fpu_disable();
if (fpu->fpregs_active) { if (fpu->fpregs_active) {
/*
* Ignore return value -- we don't care if reg state
* is clobbered.
*/
copy_fpregs_to_fpstate(fpu); copy_fpregs_to_fpstate(fpu);
} else { } else {
this_cpu_write(fpu_fpregs_owner_ctx, NULL); this_cpu_write(fpu_fpregs_owner_ctx, NULL);
...@@ -189,9 +193,13 @@ void fpu__save(struct fpu *fpu) ...@@ -189,9 +193,13 @@ void fpu__save(struct fpu *fpu)
preempt_disable(); preempt_disable();
if (fpu->fpregs_active) { if (fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(fpu)) if (!copy_fpregs_to_fpstate(fpu)) {
if (use_eager_fpu())
copy_kernel_to_fpregs(&fpu->state);
else
fpregs_deactivate(fpu); fpregs_deactivate(fpu);
} }
}
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL_GPL(fpu__save); EXPORT_SYMBOL_GPL(fpu__save);
...@@ -223,14 +231,15 @@ void fpstate_init(union fpregs_state *state) ...@@ -223,14 +231,15 @@ void fpstate_init(union fpregs_state *state)
} }
EXPORT_SYMBOL_GPL(fpstate_init); EXPORT_SYMBOL_GPL(fpstate_init);
/* int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* Copy the current task's FPU state to a new task's FPU context.
*
* In both the 'eager' and the 'lazy' case we save hardware registers
* directly to the destination buffer.
*/
static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{ {
dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
if (!src_fpu->fpstate_active || !cpu_has_fpu)
return 0;
WARN_ON_FPU(src_fpu != &current->thread.fpu); WARN_ON_FPU(src_fpu != &current->thread.fpu);
/* /*
...@@ -243,10 +252,9 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu) ...@@ -243,10 +252,9 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
/* /*
* Save current FPU registers directly into the child * Save current FPU registers directly into the child
* FPU context, without any memory-to-memory copying. * FPU context, without any memory-to-memory copying.
* * In lazy mode, if the FPU context isn't loaded into
* If the FPU context got destroyed in the process (FNSAVE * fpregs, CR0.TS will be set and do_device_not_available
* done on old CPUs) then copy it back into the source * will load the FPU context.
* context and mark the current task for lazy restore.
* *
* We have to do all this with preemption disabled, * We have to do all this with preemption disabled,
* mostly because of the FNSAVE case, because in that * mostly because of the FNSAVE case, because in that
...@@ -259,19 +267,13 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu) ...@@ -259,19 +267,13 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
preempt_disable(); preempt_disable();
if (!copy_fpregs_to_fpstate(dst_fpu)) { if (!copy_fpregs_to_fpstate(dst_fpu)) {
memcpy(&src_fpu->state, &dst_fpu->state, xstate_size); memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
if (use_eager_fpu())
copy_kernel_to_fpregs(&src_fpu->state);
else
fpregs_deactivate(src_fpu); fpregs_deactivate(src_fpu);
} }
preempt_enable(); preempt_enable();
}
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
if (src_fpu->fpstate_active && cpu_has_fpu)
fpu_copy(dst_fpu, src_fpu);
return 0; return 0;
} }
...@@ -423,7 +425,7 @@ void fpu__clear(struct fpu *fpu) ...@@ -423,7 +425,7 @@ void fpu__clear(struct fpu *fpu)
{ {
WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */ WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
if (!use_eager_fpu()) { if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
/* FPU state will be reallocated lazily at the first use. */ /* FPU state will be reallocated lazily at the first use. */
fpu__drop(fpu); fpu__drop(fpu);
} else { } else {
......
...@@ -260,7 +260,10 @@ static void __init fpu__init_system_xstate_size_legacy(void) ...@@ -260,7 +260,10 @@ static void __init fpu__init_system_xstate_size_legacy(void)
* not only saved the restores along the way, but we also have the * not only saved the restores along the way, but we also have the
* FPU ready to be used for the original task. * FPU ready to be used for the original task.
* *
* 'eager' switching is used on modern CPUs, there we switch the FPU * 'lazy' is deprecated because it's almost never a performance win
* and it's much more complicated than 'eager'.
*
* 'eager' switching is by default on all CPUs, there we switch the FPU
* state during every context switch, regardless of whether the task * state during every context switch, regardless of whether the task
* has used FPU instructions in that time slice or not. This is done * has used FPU instructions in that time slice or not. This is done
* because modern FPU context saving instructions are able to optimize * because modern FPU context saving instructions are able to optimize
...@@ -271,7 +274,7 @@ static void __init fpu__init_system_xstate_size_legacy(void) ...@@ -271,7 +274,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
* to use 'eager' restores, if we detect that a task is using the FPU * to use 'eager' restores, if we detect that a task is using the FPU
* frequently. See the fpu->counter logic in fpu/internal.h for that. ] * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
*/ */
static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO; static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
/* /*
* Find supported xfeatures based on cpu features and command-line input. * Find supported xfeatures based on cpu features and command-line input.
...@@ -348,15 +351,9 @@ static void __init fpu__init_system_ctx_switch(void) ...@@ -348,15 +351,9 @@ static void __init fpu__init_system_ctx_switch(void)
*/ */
static void __init fpu__init_parse_early_param(void) static void __init fpu__init_parse_early_param(void)
{ {
/*
* No need to check "eagerfpu=auto" again, since it is the
* initial default.
*/
if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) { if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
eagerfpu = DISABLE; eagerfpu = DISABLE;
fpu__clear_eager_fpu_features(); fpu__clear_eager_fpu_features();
} else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on")) {
eagerfpu = ENABLE;
} }
if (cmdline_find_option_bool(boot_command_line, "no387")) if (cmdline_find_option_bool(boot_command_line, "no387"))
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/bootparam.h> #include <asm/bootparam.h>
...@@ -389,6 +389,12 @@ default_entry: ...@@ -389,6 +389,12 @@ default_entry:
/* Make changes effective */ /* Make changes effective */
wrmsr wrmsr
/*
* And make sure that all the mappings we set up have NX set from
* the beginning.
*/
orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
enable_paging: enable_paging:
/* /*
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
L4_START_KERNEL = pgd_index(__START_KERNEL_map) L4_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map)
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/cpufeature.h>
#include <asm/irqdomain.h> #include <asm/irqdomain.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/hpet.h> #include <asm/hpet.h>
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <asm/processor.h> #include <asm/cpufeature.h>
#include <asm/msr.h> #include <asm/msr.h>
static struct class *msr_class; static struct class *msr_class;
......
...@@ -692,12 +692,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -692,12 +692,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
{ {
#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64) #ifdef CONFIG_X86_64
if (is_ia32_task())
return __NR_ia32_restart_syscall;
#endif
#ifdef CONFIG_X86_X32_ABI
return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
#else
return __NR_restart_syscall; return __NR_restart_syscall;
#else /* !CONFIG_X86_32 && CONFIG_X86_64 */ #endif
return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
__NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
} }
/* /*
......
...@@ -83,30 +83,16 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_bss; ...@@ -83,30 +83,16 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
DECLARE_BITMAP(used_vectors, NR_VECTORS); DECLARE_BITMAP(used_vectors, NR_VECTORS);
EXPORT_SYMBOL_GPL(used_vectors); EXPORT_SYMBOL_GPL(used_vectors);
static inline void conditional_sti(struct pt_regs *regs) static inline void cond_local_irq_enable(struct pt_regs *regs)
{ {
if (regs->flags & X86_EFLAGS_IF) if (regs->flags & X86_EFLAGS_IF)
local_irq_enable(); local_irq_enable();
} }
static inline void preempt_conditional_sti(struct pt_regs *regs) static inline void cond_local_irq_disable(struct pt_regs *regs)
{
preempt_count_inc();
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
static inline void conditional_cli(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_disable();
}
static inline void preempt_conditional_cli(struct pt_regs *regs)
{ {
if (regs->flags & X86_EFLAGS_IF) if (regs->flags & X86_EFLAGS_IF)
local_irq_disable(); local_irq_disable();
preempt_count_dec();
} }
void ist_enter(struct pt_regs *regs) void ist_enter(struct pt_regs *regs)
...@@ -286,7 +272,7 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str, ...@@ -286,7 +272,7 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) { NOTIFY_STOP) {
conditional_sti(regs); cond_local_irq_enable(regs);
do_trap(trapnr, signr, str, regs, error_code, do_trap(trapnr, signr, str, regs, error_code,
fill_trap_info(regs, signr, trapnr, &info)); fill_trap_info(regs, signr, trapnr, &info));
} }
...@@ -368,7 +354,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) ...@@ -368,7 +354,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
if (notify_die(DIE_TRAP, "bounds", regs, error_code, if (notify_die(DIE_TRAP, "bounds", regs, error_code,
X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
return; return;
conditional_sti(regs); cond_local_irq_enable(regs);
if (!user_mode(regs)) if (!user_mode(regs))
die("bounds", regs, error_code); die("bounds", regs, error_code);
...@@ -443,7 +429,7 @@ do_general_protection(struct pt_regs *regs, long error_code) ...@@ -443,7 +429,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
struct task_struct *tsk; struct task_struct *tsk;
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
conditional_sti(regs); cond_local_irq_enable(regs);
if (v8086_mode(regs)) { if (v8086_mode(regs)) {
local_irq_enable(); local_irq_enable();
...@@ -517,9 +503,11 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) ...@@ -517,9 +503,11 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
* as we may switch to the interrupt stack. * as we may switch to the interrupt stack.
*/ */
debug_stack_usage_inc(); debug_stack_usage_inc();
preempt_conditional_sti(regs); preempt_disable();
cond_local_irq_enable(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
preempt_conditional_cli(regs); cond_local_irq_disable(regs);
preempt_enable_no_resched();
debug_stack_usage_dec(); debug_stack_usage_dec();
exit: exit:
ist_exit(regs); ist_exit(regs);
...@@ -648,12 +636,14 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) ...@@ -648,12 +636,14 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
debug_stack_usage_inc(); debug_stack_usage_inc();
/* It's safe to allow irq's after DR6 has been saved */ /* It's safe to allow irq's after DR6 has been saved */
preempt_conditional_sti(regs); preempt_disable();
cond_local_irq_enable(regs);
if (v8086_mode(regs)) { if (v8086_mode(regs)) {
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
X86_TRAP_DB); X86_TRAP_DB);
preempt_conditional_cli(regs); cond_local_irq_disable(regs);
preempt_enable_no_resched();
debug_stack_usage_dec(); debug_stack_usage_dec();
goto exit; goto exit;
} }
...@@ -673,7 +663,8 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code) ...@@ -673,7 +663,8 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
si_code = get_si_code(tsk->thread.debugreg6); si_code = get_si_code(tsk->thread.debugreg6);
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
send_sigtrap(tsk, regs, error_code, si_code); send_sigtrap(tsk, regs, error_code, si_code);
preempt_conditional_cli(regs); cond_local_irq_disable(regs);
preempt_enable_no_resched();
debug_stack_usage_dec(); debug_stack_usage_dec();
exit: exit:
...@@ -696,7 +687,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) ...@@ -696,7 +687,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
return; return;
conditional_sti(regs); cond_local_irq_enable(regs);
if (!user_mode(regs)) { if (!user_mode(regs)) {
if (!fixup_exception(regs)) { if (!fixup_exception(regs)) {
...@@ -743,20 +734,19 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code) ...@@ -743,20 +734,19 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
dotraplinkage void dotraplinkage void
do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
{ {
conditional_sti(regs); cond_local_irq_enable(regs);
} }
dotraplinkage void dotraplinkage void
do_device_not_available(struct pt_regs *regs, long error_code) do_device_not_available(struct pt_regs *regs, long error_code)
{ {
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
BUG_ON(use_eager_fpu());
#ifdef CONFIG_MATH_EMULATION #ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) { if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
struct math_emu_info info = { }; struct math_emu_info info = { };
conditional_sti(regs); cond_local_irq_enable(regs);
info.regs = regs; info.regs = regs;
math_emulate(&info); math_emulate(&info);
...@@ -765,7 +755,7 @@ do_device_not_available(struct pt_regs *regs, long error_code) ...@@ -765,7 +755,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
#endif #endif
fpu__restore(&current->thread.fpu); /* interrupts still off */ fpu__restore(&current->thread.fpu); /* interrupts still off */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
conditional_sti(regs); cond_local_irq_enable(regs);
#endif #endif
} }
NOKPROBE_SYMBOL(do_device_not_available); NOKPROBE_SYMBOL(do_device_not_available);
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
* appropriately. Either display a message or halt. * appropriately. Either display a message or halt.
*/ */
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/msr-index.h> #include <asm/msr-index.h>
verify_cpu: verify_cpu:
......
...@@ -362,7 +362,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) ...@@ -362,7 +362,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
/* make room for real-mode segments */ /* make room for real-mode segments */
tsk->thread.sp0 += 16; tsk->thread.sp0 += 16;
if (static_cpu_has_safe(X86_FEATURE_SEP)) if (static_cpu_has(X86_FEATURE_SEP))
tsk->thread.sysenter_cs = 0; tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread); load_sp0(tss, &tsk->thread);
......
...@@ -195,6 +195,17 @@ SECTIONS ...@@ -195,6 +195,17 @@ SECTIONS
:init :init
#endif #endif
/*
* Section for code used exclusively before alternatives are run. All
* references to such code must be patched out by alternatives, normally
* by using X86_FEATURE_ALWAYS CPU feature bit.
*
* See static_cpu_has() for an example.
*/
.altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
*(.altinstr_aux)
}
INIT_DATA_SECTION(16) INIT_DATA_SECTION(16)
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
......
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
/* /*
......
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
/* /*
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h> #include <asm/smap.h>
......
/* Copyright 2002 Andi Kleen */ /* Copyright 2002 Andi Kleen */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
/* /*
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#undef memmove #undef memmove
......
/* Copyright 2002 Andi Kleen, SuSE Labs */ /* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
.weak memset .weak memset
......
...@@ -388,7 +388,6 @@ kernel_physical_mapping_init(unsigned long start, ...@@ -388,7 +388,6 @@ kernel_physical_mapping_init(unsigned long start,
} }
pte_t *kmap_pte; pte_t *kmap_pte;
pgprot_t kmap_prot;
static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
{ {
...@@ -405,8 +404,6 @@ static void __init kmap_init(void) ...@@ -405,8 +404,6 @@ static void __init kmap_init(void)
*/ */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
kmap_prot = PAGE_KERNEL;
} }
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/init.h> #include <asm/init.h>
#include <asm/uv/uv.h>
#include <asm/setup.h> #include <asm/setup.h>
#include "mm_internal.h" #include "mm_internal.h"
...@@ -1206,26 +1207,13 @@ int kern_addr_valid(unsigned long addr) ...@@ -1206,26 +1207,13 @@ int kern_addr_valid(unsigned long addr)
static unsigned long probe_memory_block_size(void) static unsigned long probe_memory_block_size(void)
{ {
/* start from 2g */ unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
unsigned long bz = 1UL<<31;
if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) { /* if system is UV or has 64GB of RAM or more, use large blocks */
pr_info("Using 2GB memory block size for large-memory system\n"); if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
return 2UL * 1024 * 1024 * 1024; bz = 2UL << 30; /* 2GB */
}
/* less than 64g installed */
if ((max_pfn << PAGE_SHIFT) < (16UL << 32))
return MIN_MEMORY_BLOCK_SIZE;
/* get the tail size */
while (bz > MIN_MEMORY_BLOCK_SIZE) {
if (!((max_pfn << PAGE_SHIFT) & (bz - 1)))
break;
bz >>= 1;
}
printk(KERN_DEBUG "memory block size : %ldMB\n", bz >> 20); pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
return bz; return bz;
} }
......
...@@ -120,11 +120,22 @@ void __init kasan_init(void) ...@@ -120,11 +120,22 @@ void __init kasan_init(void)
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END); (void *)KASAN_SHADOW_END);
memset(kasan_zero_page, 0, PAGE_SIZE);
load_cr3(init_level4_pgt); load_cr3(init_level4_pgt);
__flush_tlb_all(); __flush_tlb_all();
init_task.kasan_depth = 0;
/*
* kasan_zero_page has been used as early shadow memory, thus it may
* contain some garbage. Now we can clear and write protect it, since
* after the TLB flush no one should write to it.
*/
memset(kasan_zero_page, 0, PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++) {
pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
set_pte(&kasan_zero_pte[i], pte);
}
/* Flush TLBs again to be sure that write protection applied. */
__flush_tlb_all();
init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n"); pr_info("KernelAddressSanitizer initialized\n");
} }
...@@ -465,46 +465,67 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) ...@@ -465,46 +465,67 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
return true; return true;
} }
/*
* Mark all currently memblock-reserved physical memory (which covers the
* kernel's own memory ranges) as hot-unswappable.
*/
static void __init numa_clear_kernel_node_hotplug(void) static void __init numa_clear_kernel_node_hotplug(void)
{ {
int i, nid; nodemask_t reserved_nodemask = NODE_MASK_NONE;
nodemask_t numa_kernel_nodes = NODE_MASK_NONE; struct memblock_region *mb_region;
phys_addr_t start, end; int i;
struct memblock_region *r;
/* /*
* We have to do some preprocessing of memblock regions, to
* make them suitable for reservation.
*
* At this time, all memory regions reserved by memblock are * At this time, all memory regions reserved by memblock are
* used by the kernel. Set the nid in memblock.reserved will * used by the kernel, but those regions are not split up
* mark out all the nodes the kernel resides in. * along node boundaries yet, and don't necessarily have their
* node ID set yet either.
*
* So iterate over all memory known to the x86 architecture,
* and use those ranges to set the nid in memblock.reserved.
* This will split up the memblock regions along node
* boundaries and will set the node IDs as well.
*/ */
for (i = 0; i < numa_meminfo.nr_blks; i++) { for (i = 0; i < numa_meminfo.nr_blks; i++) {
struct numa_memblk *mb = &numa_meminfo.blk[i]; struct numa_memblk *mb = numa_meminfo.blk + i;
int ret;
memblock_set_node(mb->start, mb->end - mb->start, ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
&memblock.reserved, mb->nid); WARN_ON_ONCE(ret);
} }
/* /*
* Mark all kernel nodes. * Now go over all reserved memblock regions, to construct a
* node mask of all kernel reserved memory areas.
* *
* When booting with mem=nn[kMG] or in a kdump kernel, numa_meminfo * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
* may not include all the memblock.reserved memory ranges because * numa_meminfo might not include all memblock.reserved
* trim_snb_memory() reserves specific pages for Sandy Bridge graphics. * memory ranges, because quirks such as trim_snb_memory()
* reserve specific pages for Sandy Bridge graphics. ]
*/ */
for_each_memblock(reserved, r) for_each_memblock(reserved, mb_region) {
if (r->nid != MAX_NUMNODES) if (mb_region->nid != MAX_NUMNODES)
node_set(r->nid, numa_kernel_nodes); node_set(mb_region->nid, reserved_nodemask);
}
/* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */ /*
* Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
* belonging to the reserved node mask.
*
* Note that this will include memory regions that reside
* on nodes that contain kernel memory - entire nodes
* become hot-unpluggable:
*/
for (i = 0; i < numa_meminfo.nr_blks; i++) { for (i = 0; i < numa_meminfo.nr_blks; i++) {
nid = numa_meminfo.blk[i].nid; struct numa_memblk *mb = numa_meminfo.blk + i;
if (!node_isset(nid, numa_kernel_nodes))
continue;
start = numa_meminfo.blk[i].start; if (!node_isset(mb->nid, reserved_nodemask))
end = numa_meminfo.blk[i].end; continue;
memblock_clear_hotplug(start, end - start); memblock_clear_hotplug(mb->start, mb->end - mb->start);
} }
} }
......
...@@ -943,7 +943,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, ...@@ -943,7 +943,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
return -EINVAL; return -EINVAL;
} }
*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
cachemode2protval(pcm)); cachemode2protval(pcm));
return 0; return 0;
...@@ -959,7 +959,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, ...@@ -959,7 +959,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
/* Set prot based on lookup */ /* Set prot based on lookup */
pcm = lookup_memtype(pfn_t_to_phys(pfn)); pcm = lookup_memtype(pfn_t_to_phys(pfn));
*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
cachemode2protval(pcm)); cachemode2protval(pcm));
return 0; return 0;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/cpufeature.h>
static int disable_nx; static int disable_nx;
...@@ -31,9 +32,8 @@ early_param("noexec", noexec_setup); ...@@ -31,9 +32,8 @@ early_param("noexec", noexec_setup);
void x86_configure_nx(void) void x86_configure_nx(void)
{ {
if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx) /* If disable_nx is set, clear NX on all new mappings going forward. */
__supported_pte_mask |= _PAGE_NX; if (disable_nx)
else
__supported_pte_mask &= ~_PAGE_NX; __supported_pte_mask &= ~_PAGE_NX;
} }
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpufeature.h>
#include "op_x86_model.h" #include "op_x86_model.h"
#include "op_counter.h" #include "op_counter.h"
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/cpufeature.h> #include <asm/cpufeatures.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/nops.h> #include <asm/nops.h>
......
...@@ -25,11 +25,11 @@ ...@@ -25,11 +25,11 @@
#define old_mmap sys_old_mmap #define old_mmap sys_old_mmap
#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ; #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#include <asm/syscalls_32.h> #include <asm/syscalls_32.h>
#undef __SYSCALL_I386 #undef __SYSCALL_I386
#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, #define __SYSCALL_I386(nr, sym, qual) [ nr ] = sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
......
...@@ -35,14 +35,11 @@ ...@@ -35,14 +35,11 @@
#define stub_execveat sys_execveat #define stub_execveat sys_execveat
#define stub_rt_sigreturn sys_rt_sigreturn #define stub_rt_sigreturn sys_rt_sigreturn
#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#include <asm/syscalls_64.h> #include <asm/syscalls_64.h>
#undef __SYSCALL_64 #undef __SYSCALL_64
#define __SYSCALL_64(nr, sym, compat) [ nr ] = sym, #define __SYSCALL_64(nr, sym, qual) [ nr ] = sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
......
...@@ -9,14 +9,12 @@ ...@@ -9,14 +9,12 @@
#include <asm/types.h> #include <asm/types.h>
#ifdef __i386__ #ifdef __i386__
#define __SYSCALL_I386(nr, sym, compat) [nr] = 1, #define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
static char syscalls[] = { static char syscalls[] = {
#include <asm/syscalls_32.h> #include <asm/syscalls_32.h>
}; };
#else #else
#define __SYSCALL_64(nr, sym, compat) [nr] = 1, #define __SYSCALL_64(nr, sym, qual) [nr] = 1,
#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
static char syscalls[] = { static char syscalls[] = {
#include <asm/syscalls_64.h> #include <asm/syscalls_64.h>
}; };
......
...@@ -1431,7 +1431,7 @@ static int __init intel_pstate_init(void) ...@@ -1431,7 +1431,7 @@ static int __init intel_pstate_init(void)
if (!all_cpu_data) if (!all_cpu_data)
return -ENOMEM; return -ENOMEM;
if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) { if (static_cpu_has(X86_FEATURE_HWP) && !no_hwp) {
pr_info("intel_pstate: HWP enabled\n"); pr_info("intel_pstate: HWP enabled\n");
hwp_active++; hwp_active++;
} }
......
...@@ -931,7 +931,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags) ...@@ -931,7 +931,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
if (bio_flags & EXTENT_BIO_TREE_LOG) if (bio_flags & EXTENT_BIO_TREE_LOG)
return 0; return 0;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (static_cpu_has_safe(X86_FEATURE_XMM4_2)) if (static_cpu_has(X86_FEATURE_XMM4_2))
return 0; return 0;
#endif #endif
return 1; return 1;
......
...@@ -2138,6 +2138,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr, ...@@ -2138,6 +2138,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn); unsigned long pfn);
int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn); pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
......
...@@ -566,10 +566,26 @@ static inline void clear_tlb_flush_pending(struct mm_struct *mm) ...@@ -566,10 +566,26 @@ static inline void clear_tlb_flush_pending(struct mm_struct *mm)
} }
#endif #endif
struct vm_special_mapping struct vm_fault;
{
const char *name; struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
/*
* If .fault is not provided, this points to a
* NULL-terminated array of pages that back the special mapping.
*
* This must not be NULL unless .fault is provided.
*/
struct page **pages; struct page **pages;
/*
* If non-NULL, then this is called to resolve page faults
* on the special mapping. If used, .pages is not checked.
*/
int (*fault)(const struct vm_special_mapping *sm,
struct vm_area_struct *vma,
struct vm_fault *vmf);
}; };
enum tlb_flush_reason { enum tlb_flush_reason {
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#ifdef CONFIG_X86 #ifdef CONFIG_X86
#include <asm/processor.h> /* for boot_cpu_has below */ #include <asm/cpufeature.h> /* for boot_cpu_has below */
#endif #endif
#define TEST(bit, op, c_op, val) \ #define TEST(bit, op, c_op, val) \
......
...@@ -1550,9 +1550,30 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, ...@@ -1550,9 +1550,30 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
*/ */
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn) unsigned long pfn)
{
return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_pfn);
/**
* vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
* @pgprot: pgprot flags for the inserted page
*
* This is exactly like vm_insert_pfn, except that it allows drivers to
* to override pgprot on a per-page basis.
*
* This only makes sense for IO mappings, and it makes no sense for
* cow mappings. In general, using multiple vmas is preferable;
* vm_insert_pfn_prot should only be used if using multiple VMAs is
* impractical.
*/
int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot)
{ {
int ret; int ret;
pgprot_t pgprot = vma->vm_page_prot;
/* /*
* Technically, architectures with pte_special can avoid all these * Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like * restrictions (same for remap_pfn_range). However we would like
...@@ -1574,7 +1595,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, ...@@ -1574,7 +1595,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
return ret; return ret;
} }
EXPORT_SYMBOL(vm_insert_pfn); EXPORT_SYMBOL(vm_insert_pfn_prot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn) pfn_t pfn)
......
...@@ -3042,11 +3042,16 @@ static int special_mapping_fault(struct vm_area_struct *vma, ...@@ -3042,11 +3042,16 @@ static int special_mapping_fault(struct vm_area_struct *vma,
pgoff_t pgoff; pgoff_t pgoff;
struct page **pages; struct page **pages;
if (vma->vm_ops == &legacy_special_mapping_vmops) if (vma->vm_ops == &legacy_special_mapping_vmops) {
pages = vma->vm_private_data; pages = vma->vm_private_data;
else } else {
pages = ((struct vm_special_mapping *)vma->vm_private_data)-> struct vm_special_mapping *sm = vma->vm_private_data;
pages;
if (sm->fault)
return sm->fault(sm, vma, vmf);
pages = sm->pages;
}
for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
pgoff--; pgoff--;
......
...@@ -4,15 +4,17 @@ include ../lib.mk ...@@ -4,15 +4,17 @@ include ../lib.mk
.PHONY: all all_32 all_64 warn_32bit_failure clean .PHONY: all all_32 all_64 warn_32bit_failure clean
TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall \
check_initial_reg_state
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso \ TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \ test_FCMOV test_FCOMI test_FISTTP \
ldt_gdt \ ldt_gdt \
vdso_restorer vdso_restorer
TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32) BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64) BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
CFLAGS := -O2 -g -std=gnu99 -pthread -Wall CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
...@@ -40,7 +42,7 @@ clean: ...@@ -40,7 +42,7 @@ clean:
$(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c $(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c
$(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
$(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c $(TARGETS_C_64BIT_ALL:%=%_64): %_64: %.c
$(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
# x86_64 users should be encouraged to install 32-bit libraries # x86_64 users should be encouraged to install 32-bit libraries
...@@ -65,3 +67,9 @@ endif ...@@ -65,3 +67,9 @@ endif
sysret_ss_attrs_64: thunks.S sysret_ss_attrs_64: thunks.S
ptrace_syscall_32: raw_syscall_helper_32.S ptrace_syscall_32: raw_syscall_helper_32.S
test_syscall_vdso_32: thunks_32.S test_syscall_vdso_32: thunks_32.S
# check_initial_reg_state is special: it needs a custom entry, and it
# needs to be static so that its interpreter doesn't destroy its initial
# state.
check_initial_reg_state_32: CFLAGS += -Wl,-ereal_start -static
check_initial_reg_state_64: CFLAGS += -Wl,-ereal_start -static
/*
* check_initial_reg_state.c - check that execve sets the correct state
* Copyright (c) 2014-2016 Andrew Lutomirski
*
* This program is free software; you can redistribute it and/or modify
* it under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#define _GNU_SOURCE
#include <stdio.h>
unsigned long ax, bx, cx, dx, si, di, bp, sp, flags;
unsigned long r8, r9, r10, r11, r12, r13, r14, r15;
asm (
".pushsection .text\n\t"
".type real_start, @function\n\t"
".global real_start\n\t"
"real_start:\n\t"
#ifdef __x86_64__
"mov %rax, ax\n\t"
"mov %rbx, bx\n\t"
"mov %rcx, cx\n\t"
"mov %rdx, dx\n\t"
"mov %rsi, si\n\t"
"mov %rdi, di\n\t"
"mov %rbp, bp\n\t"
"mov %rsp, sp\n\t"
"mov %r8, r8\n\t"
"mov %r9, r9\n\t"
"mov %r10, r10\n\t"
"mov %r11, r11\n\t"
"mov %r12, r12\n\t"
"mov %r13, r13\n\t"
"mov %r14, r14\n\t"
"mov %r15, r15\n\t"
"pushfq\n\t"
"popq flags\n\t"
#else
"mov %eax, ax\n\t"
"mov %ebx, bx\n\t"
"mov %ecx, cx\n\t"
"mov %edx, dx\n\t"
"mov %esi, si\n\t"
"mov %edi, di\n\t"
"mov %ebp, bp\n\t"
"mov %esp, sp\n\t"
"pushfl\n\t"
"popl flags\n\t"
#endif
"jmp _start\n\t"
".size real_start, . - real_start\n\t"
".popsection");
int main()
{
int nerrs = 0;
if (sp == 0) {
printf("[FAIL]\tTest was built incorrectly\n");
return 1;
}
if (ax || bx || cx || dx || si || di || bp
#ifdef __x86_64__
|| r8 || r9 || r10 || r11 || r12 || r13 || r14 || r15
#endif
) {
printf("[FAIL]\tAll GPRs except SP should be 0\n");
#define SHOW(x) printf("\t" #x " = 0x%lx\n", x);
SHOW(ax);
SHOW(bx);
SHOW(cx);
SHOW(dx);
SHOW(si);
SHOW(di);
SHOW(bp);
SHOW(sp);
#ifdef __x86_64__
SHOW(r8);
SHOW(r9);
SHOW(r10);
SHOW(r11);
SHOW(r12);
SHOW(r13);
SHOW(r14);
SHOW(r15);
#endif
nerrs++;
} else {
printf("[OK]\tAll GPRs except SP are 0\n");
}
if (flags != 0x202) {
printf("[FAIL]\tFLAGS is 0x%lx, but it should be 0x202\n", flags);
nerrs++;
} else {
printf("[OK]\tFLAGS is 0x202\n");
}
return nerrs ? 1 : 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment