Commit d5f744f9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-entry-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 entry code updates from Thomas Gleixner:

 - Convert the 32bit syscalls to be pt_regs based which removes the
   requirement to push all 6 potential arguments onto the stack and
   consolidates the interface with the 64bit variant

 - The first small portion of the exception and syscall related entry
   code consolidation which aims to address the recently discovered
   issues vs. RCU, int3, NMI and some other exceptions which can
   interrupt any context. The bulk of the changes is still work in
   progress and aimed for 5.8.

 - A few lockdep namespace cleanups which have been applied into this
   branch to keep the prerequisites for the ongoing work confined.

* tag 'x86-entry-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (35 commits)
  x86/entry: Fix build error x86 with !CONFIG_POSIX_TIMERS
  lockdep: Rename trace_{hard,soft}{irq_context,irqs_enabled}()
  lockdep: Rename trace_softirqs_{on,off}()
  lockdep: Rename trace_hardirq_{enter,exit}()
  x86/entry: Rename ___preempt_schedule
  x86: Remove unneeded includes
  x86/entry: Drop asmlinkage from syscalls
  x86/entry/32: Enable pt_regs based syscalls
  x86/entry/32: Use IA32-specific wrappers for syscalls taking 64-bit arguments
  x86/entry/32: Rename 32-bit specific syscalls
  x86/entry/32: Clean up syscall_32.tbl
  x86/entry: Remove ABI prefixes from functions in syscall tables
  x86/entry/64: Add __SYSCALL_COMMON()
  x86/entry: Remove syscall qualifier support
  x86/entry/64: Remove ptregs qualifier from syscall table
  x86/entry: Move max syscall number calculation to syscallhdr.sh
  x86/entry/64: Split X32 syscall table into its own file
  x86/entry/64: Move sys_ni_syscall stub to common.c
  x86/entry/64: Use syscall wrappers for x32_rt_sigreturn
  x86/entry: Refactor SYS_NI macros
  ...
parents dbb381b6 290a4474
......@@ -30,7 +30,6 @@ config X86_64
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select SWIOTLB
select ARCH_HAS_SYSCALL_WRAPPER
config FORCE_DYNAMIC_FTRACE
def_bool y
......@@ -79,6 +78,7 @@ config X86
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
......
......@@ -14,4 +14,5 @@ obj-y += vdso/
obj-y += vsyscall/
obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o
obj-$(CONFIG_X86_X32_ABI) += syscall_x32.o
......@@ -333,20 +333,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
if (likely(nr < IA32_NR_syscalls)) {
nr = array_index_nospec(nr, IA32_NR_syscalls);
#ifdef CONFIG_IA32_EMULATION
regs->ax = ia32_sys_call_table[nr](regs);
#else
/*
* It's possible that a 32-bit syscall implementation
* takes a 64-bit parameter but nonetheless assumes that
* the high bits are zero. Make sure we zero-extend all
* of the args.
*/
regs->ax = ia32_sys_call_table[nr](
(unsigned int)regs->bx, (unsigned int)regs->cx,
(unsigned int)regs->dx, (unsigned int)regs->si,
(unsigned int)regs->di, (unsigned int)regs->bp);
#endif /* CONFIG_IA32_EMULATION */
}
syscall_return_slowpath(regs);
......@@ -438,3 +425,8 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
#endif
}
#endif
SYSCALL_DEFINE0(ni_syscall)
{
return -ENOSYS;
}
......@@ -1088,10 +1088,10 @@ SYM_FUNC_START(entry_INT80_32)
STACKLEAK_ERASE
restore_all:
TRACE_IRQS_IRET
TRACE_IRQS_ON
SWITCH_TO_ENTRY_STACK
CHECK_AND_APPLY_ESPFIX
.Lrestore_nocheck:
/* Switch back to user CR3 */
SWITCH_TO_USER_CR3 scratch_reg=%eax
......@@ -1290,7 +1290,7 @@ SYM_CODE_END(simd_coprocessor_error)
SYM_CODE_START(device_not_available)
ASM_CLAC
pushl $-1 # mark this as an int
pushl $0
pushl $do_device_not_available
jmp common_exception
SYM_CODE_END(device_not_available)
......@@ -1365,7 +1365,7 @@ SYM_CODE_END(divide_error)
SYM_CODE_START(machine_check)
ASM_CLAC
pushl $0
pushl machine_check_vector
pushl $do_mce
jmp common_exception
SYM_CODE_END(machine_check)
#endif
......@@ -1531,7 +1531,7 @@ SYM_CODE_START(debug)
* Entry from sysenter is now handled in common_exception
*/
ASM_CLAC
pushl $-1 # mark this as an int
pushl $0
pushl $do_debug
jmp common_exception
SYM_CODE_END(debug)
......@@ -1682,18 +1682,13 @@ SYM_CODE_END(nmi)
SYM_CODE_START(int3)
ASM_CLAC
pushl $-1 # mark this as an int
SAVE_ALL switch_stacks=1
ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
call do_int3
jmp ret_from_exception
pushl $0
pushl $do_int3
jmp common_exception
SYM_CODE_END(int3)
SYM_CODE_START(general_protection)
ASM_CLAC
pushl $do_general_protection
jmp common_exception
SYM_CODE_END(general_protection)
......
......@@ -174,7 +174,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
movq %rsp, %rsi
call do_syscall_64 /* returns with IRQs disabled */
TRACE_IRQS_IRETQ /* we're about to change IF */
TRACE_IRQS_ON /* return enables interrupts */
/*
* Try to use SYSRET instead of IRET if we're returning to
......@@ -619,7 +619,7 @@ ret_from_intr:
.Lretint_user:
mov %rsp,%rdi
call prepare_exit_to_usermode
TRACE_IRQS_IRETQ
TRACE_IRQS_ON
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
#ifdef CONFIG_DEBUG_ENTRY
......
......@@ -4,29 +4,22 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <asm/asm-offsets.h>
#include <linux/syscalls.h>
#include <asm/unistd.h>
#include <asm/syscall.h>
#ifdef CONFIG_IA32_EMULATION
/* On X86_64, we use struct pt_regs * to pass parameters to syscalls */
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
#define __sys_ni_syscall __ia32_sys_ni_syscall
#else /* CONFIG_IA32_EMULATION */
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#define __sys_ni_syscall sys_ni_syscall
#endif /* CONFIG_IA32_EMULATION */
#define __SYSCALL_I386(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
#include <asm/syscalls_32.h>
#undef __SYSCALL_I386
#define __SYSCALL_I386(nr, sym, qual) [nr] = sym,
#define __SYSCALL_I386(nr, sym) [nr] = __ia32_##sym,
__visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = {
__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_compat_max] = &__sys_ni_syscall,
[0 ... __NR_ia32_syscall_max] = &__ia32_sys_ni_syscall,
#include <asm/syscalls_32.h>
};
......@@ -5,24 +5,17 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/syscall.h>
extern asmlinkage long sys_ni_syscall(void);
#define __SYSCALL_X32(nr, sym)
#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym)
SYSCALL_DEFINE0(ni_syscall)
{
return sys_ni_syscall();
}
#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
#define __SYSCALL_X32(nr, sym, qual) __SYSCALL_64(nr, sym, qual)
#define __SYSCALL_64(nr, sym) extern long __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
#undef __SYSCALL_64
#undef __SYSCALL_X32
#define __SYSCALL_64(nr, sym, qual) [nr] = sym,
#define __SYSCALL_X32(nr, sym, qual)
#define __SYSCALL_64(nr, sym) [nr] = __x64_##sym,
asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
/*
......@@ -32,25 +25,3 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
[0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
#include <asm/syscalls_64.h>
};
#undef __SYSCALL_64
#undef __SYSCALL_X32
#ifdef CONFIG_X86_X32_ABI
#define __SYSCALL_64(nr, sym, qual)
#define __SYSCALL_X32(nr, sym, qual) [nr] = sym,
asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_syscall_x32_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_x32_max] = &__x64_sys_ni_syscall,
#include <asm/syscalls_64.h>
};
#undef __SYSCALL_64
#undef __SYSCALL_X32
#endif
// SPDX-License-Identifier: GPL-2.0
/* System call table for x32 ABI. */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
#include <asm/unistd.h>
#include <asm/syscall.h>
#define __SYSCALL_64(nr, sym)
#define __SYSCALL_X32(nr, sym) extern long __x32_##sym(const struct pt_regs *);
#define __SYSCALL_COMMON(nr, sym) extern long __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
#undef __SYSCALL_X32
#undef __SYSCALL_COMMON
#define __SYSCALL_X32(nr, sym) [nr] = __x32_##sym,
#define __SYSCALL_COMMON(nr, sym) [nr] = __x64_##sym,
asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_x32_syscall_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_x32_syscall_max] = &__x64_sys_ni_syscall,
#include <asm/syscalls_64.h>
};
This diff is collapsed.
This diff is collapsed.
......@@ -15,14 +15,21 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
echo "#define ${fileguard} 1"
echo ""
max=0
while read nr abi name entry ; do
if [ -z "$offset" ]; then
echo "#define __NR_${prefix}${name} $nr"
else
echo "#define __NR_${prefix}${name} ($offset + $nr)"
fi
max=$nr
done
echo ""
echo "#ifdef __KERNEL__"
echo "#define __NR_${prefix}syscall_max $max"
echo "#endif"
echo ""
echo "#endif /* ${fileguard} */"
) > "$out"
......@@ -9,15 +9,7 @@ syscall_macro() {
local nr="$2"
local entry="$3"
# Entry can be either just a function name or "function/qualifier"
real_entry="${entry%%/*}"
if [ "$entry" = "$real_entry" ]; then
qualifier=
else
qualifier=${entry#*/}
fi
echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
echo "__SYSCALL_${abi}($nr, $entry)"
}
emit() {
......@@ -25,27 +17,15 @@ emit() {
local nr="$2"
local entry="$3"
local compat="$4"
local umlentry=""
if [ "$abi" != "I386" -a -n "$compat" ]; then
echo "a compat entry ($abi: $compat) for a 64-bit syscall makes no sense" >&2
exit 1
fi
# For CONFIG_UML, we need to strip the __x64_sys prefix
if [ "$abi" = "64" -a "${entry}" != "${entry#__x64_sys}" ]; then
umlentry="sys${entry#__x64_sys}"
fi
if [ -z "$compat" ]; then
if [ -n "$entry" -a -z "$umlentry" ]; then
syscall_macro "$abi" "$nr" "$entry"
elif [ -n "$umlentry" ]; then # implies -n "$entry"
echo "#ifdef CONFIG_X86"
if [ -n "$entry" ]; then
syscall_macro "$abi" "$nr" "$entry"
echo "#else /* CONFIG_UML */"
syscall_macro "$abi" "$nr" "$umlentry"
echo "#endif"
fi
else
echo "#ifdef CONFIG_X86_32"
......@@ -61,24 +41,6 @@ emit() {
grep '^[0-9]' "$in" | sort -n | (
while read nr abi name entry compat; do
abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
if [ "$abi" = "COMMON" -o "$abi" = "64" ]; then
emit 64 "$nr" "$entry" "$compat"
if [ "$abi" = "COMMON" ]; then
# COMMON means that this syscall exists in the same form for
# 64-bit and X32.
echo "#ifdef CONFIG_X86_X32_ABI"
emit X32 "$nr" "$entry" "$compat"
echo "#endif"
fi
elif [ "$abi" = "X32" ]; then
echo "#ifdef CONFIG_X86_X32_ABI"
emit X32 "$nr" "$entry" "$compat"
echo "#endif"
elif [ "$abi" = "I386" ]; then
emit "$abi" "$nr" "$entry" "$compat"
else
echo "Unknown abi $abi" >&2
exit 1
fi
done
) > "$out"
......@@ -35,9 +35,9 @@ SYM_CODE_END(\name)
#endif
#ifdef CONFIG_PREEMPTION
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
EXPORT_SYMBOL(preempt_schedule_thunk)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
#endif
......@@ -47,10 +47,10 @@ SYM_FUNC_END(\name)
#endif
#ifdef CONFIG_PREEMPTION
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
EXPORT_SYMBOL(preempt_schedule_thunk)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) \
......
......@@ -13,6 +13,7 @@
*/
#undef CONFIG_64BIT
#undef CONFIG_X86_64
#undef CONFIG_COMPAT
#undef CONFIG_PGTABLE_LEVELS
#undef CONFIG_ILLEGAL_POINTER_VALUE
#undef CONFIG_SPARSEMEM_VMEMMAP
......
......@@ -3,7 +3,7 @@
# Makefile for the ia32 kernel emulation subsystem.
#
obj-$(CONFIG_IA32_EMULATION) := sys_ia32.o ia32_signal.o
obj-$(CONFIG_IA32_EMULATION) := ia32_signal.o
obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
......
......@@ -36,7 +36,7 @@ extern void native_init_IRQ(void);
extern void handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern __visible unsigned int do_IRQ(struct pt_regs *regs);
extern __visible void do_IRQ(struct pt_regs *regs);
extern void init_ISA_irqs(void);
......
......@@ -238,9 +238,6 @@ extern void mce_disable_bank(int bank);
/*
* Exception handler
*/
/* Call the installed machine check handler for this CPU setup. */
extern void (*machine_check_vector)(struct pt_regs *, long error_code);
void do_machine_check(struct pt_regs *, long);
/*
......
......@@ -103,14 +103,14 @@ static __always_inline bool should_resched(int preempt_offset)
}
#ifdef CONFIG_PREEMPTION
extern asmlinkage void ___preempt_schedule(void);
extern asmlinkage void preempt_schedule_thunk(void);
# define __preempt_schedule() \
asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
extern asmlinkage void preempt_schedule(void);
extern asmlinkage void ___preempt_schedule_notrace(void);
extern asmlinkage void preempt_schedule_notrace_thunk(void);
# define __preempt_schedule_notrace() \
asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT)
asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT)
extern asmlinkage void preempt_schedule_notrace(void);
#endif
......
......@@ -17,9 +17,4 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
struct pt_regs *regs, unsigned long mask);
#ifdef CONFIG_X86_X32_ABI
asmlinkage long sys32_x32_rt_sigreturn(void);
#endif
#endif /* _ASM_X86_SIGHANDLING_H */
......@@ -13,23 +13,14 @@
#include <uapi/linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <asm/asm-offsets.h> /* For NR_syscalls */
#include <asm/thread_info.h> /* for TS_COMPAT */
#include <asm/unistd.h>
#ifdef CONFIG_X86_64
typedef asmlinkage long (*sys_call_ptr_t)(const struct pt_regs *);
#else
typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
#endif /* CONFIG_X86_64 */
typedef long (*sys_call_ptr_t)(const struct pt_regs *);
extern const sys_call_ptr_t sys_call_table[];
#if defined(CONFIG_X86_32)
#define ia32_sys_call_table sys_call_table
#define __NR_syscall_compat_max __NR_syscall_max
#define IA32_NR_syscalls NR_syscalls
#endif
#if defined(CONFIG_IA32_EMULATION)
......
This diff is collapsed.
......@@ -8,42 +8,8 @@
#ifndef _ASM_X86_SYSCALLS_H
#define _ASM_X86_SYSCALLS_H
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/signal.h>
#include <linux/types.h>
/* Common in X86_32 and X86_64 */
/* kernel/ioport.c */
long ksys_ioperm(unsigned long from, unsigned long num, int turn_on);
#ifdef CONFIG_X86_32
/*
* These definitions are only valid on pure 32-bit systems; x86-64 uses a
* different syscall calling convention
*/
asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
asmlinkage long sys_iopl(unsigned int);
/* kernel/ldt.c */
asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */
asmlinkage long sys_rt_sigreturn(void);
/* kernel/tls.c */
asmlinkage long sys_set_thread_area(struct user_desc __user *);
asmlinkage long sys_get_thread_area(struct user_desc __user *);
/* X86_32 only */
/* kernel/signal.c */
asmlinkage long sys_sigreturn(void);
/* kernel/vm86_32.c */
struct vm86_struct;
asmlinkage long sys_vm86old(struct vm86_struct __user *);
asmlinkage long sys_vm86(unsigned long, unsigned long);
#endif /* CONFIG_X86_32 */
#endif /* _ASM_X86_SYSCALLS_H */
......@@ -76,27 +76,24 @@ dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long err
dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);
dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code);
#ifdef CONFIG_X86_64
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long address);
asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
asmlinkage __visible notrace
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
void __init trap_init(void);
#endif
dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code);
dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code);
dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code);
dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code);
#ifdef CONFIG_X86_MCE
dotraplinkage void do_machine_check(struct pt_regs *regs, long error_code);
#endif
dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code);
#endif
dotraplinkage void do_mce(struct pt_regs *regs, long error_code);
#ifdef CONFIG_X86_64
asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
asmlinkage __visible notrace
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
void __init trap_init(void);
#endif
static inline int get_si_code(unsigned long condition)
{
if (condition & DR_STEP)
......
......@@ -13,10 +13,13 @@
# define __ARCH_WANT_SYS_OLD_MMAP
# define __ARCH_WANT_SYS_OLD_SELECT
# define __NR_ia32_syscall_max __NR_syscall_max
# else
# include <asm/unistd_64.h>
# include <asm/unistd_64_x32.h>
# include <asm/unistd_32_ia32.h>
# define __ARCH_WANT_SYS_TIME
# define __ARCH_WANT_SYS_UTIME
# define __ARCH_WANT_COMPAT_SYS_PREADV64
......@@ -26,6 +29,10 @@
# endif
# define NR_syscalls (__NR_syscall_max + 1)
# define X32_NR_syscalls (__NR_x32_syscall_max + 1)
# define IA32_NR_syscalls (__NR_ia32_syscall_max + 1)
# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
# define __ARCH_WANT_OLD_STAT
......
......@@ -52,6 +52,8 @@ obj-y += setup.o x86_init.o i8259.o irqinit.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += sys_ia32.o
obj-$(CONFIG_IA32_EMULATION) += sys_ia32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o
......
......@@ -7,11 +7,6 @@
#include <asm/ucontext.h>
#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
static char syscalls[] = {
#include <asm/syscalls_32.h>
};
/* workaround for a warning with -Wmissing-prototypes */
void foo(void);
......@@ -63,10 +58,6 @@ void foo(void)
OFFSET(stack_canary_offset, stack_canary, canary);
#endif
BLANK();
DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
DEFINE(NR_syscalls, sizeof(syscalls));
BLANK();
DEFINE(EFI_svam, offsetof(efi_runtime_services_t, set_virtual_address_map));
}
......@@ -5,30 +5,6 @@
#include <asm/ia32.h>
#define __SYSCALL_64(nr, sym, qual) [nr] = 1,
#define __SYSCALL_X32(nr, sym, qual)
static char syscalls_64[] = {
#include <asm/syscalls_64.h>
};
#undef __SYSCALL_64
#undef __SYSCALL_X32
#ifdef CONFIG_X86_X32_ABI
#define __SYSCALL_64(nr, sym, qual)
#define __SYSCALL_X32(nr, sym, qual) [nr] = 1,
static char syscalls_x32[] = {
#include <asm/syscalls_64.h>
};
#undef __SYSCALL_64
#undef __SYSCALL_X32
#endif
#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
static char syscalls_ia32[] = {
#include <asm/syscalls_32.h>
};
#undef __SYSCALL_I386
#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS)
#include <asm/kvm_para.h>
#endif
......@@ -90,17 +66,5 @@ int main(void)
DEFINE(stack_canary_offset, offsetof(struct fixed_percpu_data, stack_canary));
BLANK();
#endif
DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
DEFINE(NR_syscalls, sizeof(syscalls_64));
#ifdef CONFIG_X86_X32_ABI
DEFINE(__NR_syscall_x32_max, sizeof(syscalls_x32) - 1);
DEFINE(X32_NR_syscalls, sizeof(syscalls_x32));
#endif
DEFINE(__NR_syscall_compat_max, sizeof(syscalls_ia32) - 1);
DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));
return 0;
}
......@@ -1215,8 +1215,14 @@ static void __mc_scan_banks(struct mce *m, struct mce *final,
* On Intel systems this is entered on all CPUs in parallel through
* MCE broadcast. However some CPUs might be broken beyond repair,
* so be always careful when synchronizing with others.
*
* Tracing and kprobes are disabled: if we interrupted a kernel context
* with IF=1, we need to minimize stack usage. There are also recursion
* issues: if the machine check was due to a failure of the memory
* backing the user stack, tracing that reads the user stack will cause
* potentially infinite recursion.
*/
void do_machine_check(struct pt_regs *regs, long error_code)
void notrace do_machine_check(struct pt_regs *regs, long error_code)
{
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
......@@ -1362,6 +1368,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
ist_exit(regs);
}
EXPORT_SYMBOL_GPL(do_machine_check);
NOKPROBE_SYMBOL(do_machine_check);
#ifndef CONFIG_MEMORY_FAILURE
int memory_failure(unsigned long pfn, int flags)
......@@ -1896,10 +1903,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
void (*machine_check_vector)(struct pt_regs *, long error_code) =
unexpected_machine_check;
dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
dotraplinkage notrace void do_mce(struct pt_regs *regs, long error_code)
{
machine_check_vector(regs, error_code);
}
NOKPROBE_SYMBOL(do_mce);
/*
* Called for each booted CPU to set up machine checks.
......
......@@ -8,6 +8,9 @@
#include <linux/device.h>
#include <asm/mce.h>
/* Pointer to the installed machine check handler for this CPU setup. */
extern void (*machine_check_vector)(struct pt_regs *, long error_code);
enum severity_level {
MCE_NO_SEVERITY,
MCE_DEFERRED_SEVERITY,
......
......@@ -230,7 +230,7 @@ u64 arch_irq_stat(void)
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
__visible void __irq_entry do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
struct irq_desc * desc;
......@@ -263,7 +263,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
exiting_irq();
set_irq_regs(old_regs);
return 1;
}
#ifdef CONFIG_X86_LOCAL_APIC
......
......@@ -27,7 +27,6 @@
#include <asm/tlb.h>
#include <asm/desc.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/pgtable_areas.h>
/* This is a multiple of PAGE_SIZE. */
......
......@@ -28,7 +28,6 @@
#include <linux/hw_breakpoint.h>
#include <asm/cpu.h>
#include <asm/apic.h>
#include <asm/syscalls.h>
#include <linux/uaccess.h>
#include <asm/mwait.h>
#include <asm/fpu/internal.h>
......
......@@ -49,7 +49,6 @@
#include <asm/tlbflush.h>
#include <asm/cpu.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/vm86.h>
......
......@@ -48,7 +48,6 @@
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/ia32.h>
#include <asm/syscalls.h>
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/xen/hypervisor.h>
......
......@@ -42,8 +42,6 @@
#endif /* CONFIG_X86_64 */
#include <asm/syscall.h>
#include <asm/syscalls.h>
#include <asm/sigframe.h>
#include <asm/signal.h>
......@@ -859,7 +857,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
}
#ifdef CONFIG_X86_X32_ABI
asmlinkage long sys32_x32_rt_sigreturn(void)
COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe_x32 __user *frame;
......
......@@ -51,20 +51,80 @@
#define AA(__x) ((unsigned long)(__x))
COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename,
SYSCALL_DEFINE3(ia32_truncate64, const char __user *, filename,
unsigned long, offset_low, unsigned long, offset_high)
{
return ksys_truncate(filename,
((loff_t) offset_high << 32) | offset_low);
}
COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd,
SYSCALL_DEFINE3(ia32_ftruncate64, unsigned int, fd,
unsigned long, offset_low, unsigned long, offset_high)
{
return ksys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
}
/* warning: next two assume little endian */
SYSCALL_DEFINE5(ia32_pread64, unsigned int, fd, char __user *, ubuf,
u32, count, u32, poslo, u32, poshi)
{
return ksys_pread64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
}
SYSCALL_DEFINE5(ia32_pwrite64, unsigned int, fd, const char __user *, ubuf,
u32, count, u32, poslo, u32, poshi)
{
return ksys_pwrite64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
}
/*
* Some system calls that need sign extended arguments. This could be
* done by a generic wrapper.
*/
SYSCALL_DEFINE6(ia32_fadvise64_64, int, fd, __u32, offset_low,
__u32, offset_high, __u32, len_low, __u32, len_high,
int, advice)
{
return ksys_fadvise64_64(fd,
(((u64)offset_high)<<32) | offset_low,
(((u64)len_high)<<32) | len_low,
advice);
}
SYSCALL_DEFINE4(ia32_readahead, int, fd, unsigned int, off_lo,
unsigned int, off_hi, size_t, count)
{
return ksys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
}
SYSCALL_DEFINE6(ia32_sync_file_range, int, fd, unsigned int, off_low,
unsigned int, off_hi, unsigned int, n_low,
unsigned int, n_hi, int, flags)
{
return ksys_sync_file_range(fd,
((u64)off_hi << 32) | off_low,
((u64)n_hi << 32) | n_low, flags);
}
SYSCALL_DEFINE5(ia32_fadvise64, int, fd, unsigned int, offset_lo,
unsigned int, offset_hi, size_t, len, int, advice)
{
return ksys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
len, advice);
}
SYSCALL_DEFINE6(ia32_fallocate, int, fd, int, mode,
unsigned int, offset_lo, unsigned int, offset_hi,
unsigned int, len_lo, unsigned int, len_hi)
{
return ksys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo);
}
#ifdef CONFIG_IA32_EMULATION
/*
* Another set for IA32/LFS -- x86_64 struct stat is different due to
* support for 64bit inode numbers.
......@@ -97,7 +157,7 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
return 0;
}
COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
COMPAT_SYSCALL_DEFINE2(ia32_stat64, const char __user *, filename,
struct stat64 __user *, statbuf)
{
struct kstat stat;
......@@ -108,7 +168,7 @@ COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
return ret;
}
COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
COMPAT_SYSCALL_DEFINE2(ia32_lstat64, const char __user *, filename,
struct stat64 __user *, statbuf)
{
struct kstat stat;
......@@ -118,7 +178,7 @@ COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
return ret;
}
COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
COMPAT_SYSCALL_DEFINE2(ia32_fstat64, unsigned int, fd,
struct stat64 __user *, statbuf)
{
struct kstat stat;
......@@ -128,7 +188,7 @@ COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
return ret;
}
COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd,
COMPAT_SYSCALL_DEFINE4(ia32_fstatat64, unsigned int, dfd,
const char __user *, filename,
struct stat64 __user *, statbuf, int, flag)
{
......@@ -156,7 +216,7 @@ struct mmap_arg_struct32 {
unsigned int offset;
};
COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
COMPAT_SYSCALL_DEFINE1(ia32_mmap, struct mmap_arg_struct32 __user *, arg)
{
struct mmap_arg_struct32 a;
......@@ -170,70 +230,10 @@ COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
a.offset>>PAGE_SHIFT);
}
/* warning: next two assume little endian */
COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf,
u32, count, u32, poslo, u32, poshi)
{
return ksys_pread64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
}
COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf,
u32, count, u32, poslo, u32, poshi)
{
return ksys_pwrite64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
}
/*
* Some system calls that need sign extended arguments. This could be
* done by a generic wrapper.
*/
COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low,
__u32, offset_high, __u32, len_low, __u32, len_high,
int, advice)
{
return ksys_fadvise64_64(fd,
(((u64)offset_high)<<32) | offset_low,
(((u64)len_high)<<32) | len_low,
advice);
}
COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
unsigned int, off_hi, size_t, count)
{
return ksys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
}
COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low,
unsigned int, off_hi, unsigned int, n_low,
unsigned int, n_hi, int, flags)
{
return ksys_sync_file_range(fd,
((u64)off_hi << 32) | off_low,
((u64)n_hi << 32) | n_low, flags);
}
COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
unsigned int, offset_hi, size_t, len, int, advice)
{
return ksys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
len, advice);
}
COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
unsigned int, offset_lo, unsigned int, offset_hi,
unsigned int, len_lo, unsigned int, len_hi)
{
return ksys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo);
}
/*
* The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
*/
COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
COMPAT_SYSCALL_DEFINE5(ia32_clone, unsigned long, clone_flags,
unsigned long, newsp, int __user *, parent_tidptr,
unsigned long, tls_val, int __user *, child_tidptr)
{
......@@ -252,3 +252,4 @@ COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
return _do_fork(&args);
}
#endif /* CONFIG_IA32_EMULATION */
......@@ -21,7 +21,6 @@
#include <asm/elf.h>
#include <asm/ia32.h>
#include <asm/syscalls.h>
/*
* Align a virtual address to avoid aliasing in the I$ on AMD F15h.
......
......@@ -572,14 +572,20 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
return;
/*
* Use ist_enter despite the fact that we don't use an IST stack.
* We can be called from a kprobe in non-CONTEXT_KERNEL kernel
* mode or even during context tracking state changes.
* Unlike any other non-IST entry, we can be called from a kprobe in
* non-CONTEXT_KERNEL kernel mode or even during context tracking
* state changes. Make sure that we wake up RCU even if we're coming
* from kernel code.
*
* This means that we can't schedule. That's okay.
* This means that we can't schedule even if we came from a
* preemptible kernel context. That's okay.
*/
ist_enter(regs);
if (!user_mode(regs)) {
rcu_nmi_enter();
preempt_disable();
}
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
SIGTRAP) == NOTIFY_STOP)
......@@ -600,7 +606,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
cond_local_irq_disable(regs);
exit:
ist_exit(regs);
if (!user_mode(regs)) {
preempt_enable_no_resched();
rcu_nmi_exit();
}
}
NOKPROBE_SYMBOL(do_int3);
......@@ -862,7 +871,25 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
dotraplinkage void
do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
{
cond_local_irq_enable(regs);
/*
* This addresses a Pentium Pro Erratum:
*
* PROBLEM: If the APIC subsystem is configured in mixed mode with
* Virtual Wire mode implemented through the local APIC, an
* interrupt vector of 0Fh (Intel reserved encoding) may be
* generated by the local APIC (Int 15). This vector may be
* generated upon receipt of a spurious interrupt (an interrupt
* which is removed before the system receives the INTA sequence)
* instead of the programmed 8259 spurious interrupt vector.
*
* IMPLICATION: The spurious interrupt vector programmed in the
* 8259 is normally handled by an operating system's spurious
* interrupt handler. However, a vector of 0Fh is unknown to some
* operating systems, which would crash if this erratum occurred.
*
* In theory this could be limited to 32bit, but the handler is not
* hurting and who knows which other CPUs suffer from this.
*/
}
dotraplinkage void
......
......@@ -21,6 +21,7 @@ obj-y += checksum_32.o syscalls_32.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
subarch-y += ../kernel/sys_ia32.o
else
......
......@@ -7,7 +7,7 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <generated/user_constants.h>
#include <asm/unistd.h>
#include <asm/syscall.h>
#define __NO_STUBS
......@@ -26,11 +26,11 @@
#define old_mmap sys_old_mmap
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#define __SYSCALL_I386(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#include <asm/syscalls_32.h>
#undef __SYSCALL_I386
#define __SYSCALL_I386(nr, sym, qual) [ nr ] = sym,
#define __SYSCALL_I386(nr, sym) [ nr ] = sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
......
......@@ -7,7 +7,7 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <generated/user_constants.h>
#include <asm/unistd.h>
#include <asm/syscall.h>
#define __NO_STUBS
......@@ -36,11 +36,14 @@
#define stub_execveat sys_execveat
#define stub_rt_sigreturn sys_rt_sigreturn
#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#define __SYSCALL_X32(nr, sym)
#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym)
#define __SYSCALL_64(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
#include <asm/syscalls_64.h>
#undef __SYSCALL_64
#define __SYSCALL_64(nr, sym, qual) [ nr ] = sym,
#define __SYSCALL_64(nr, sym) [ nr ] = sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
......
......@@ -9,18 +9,6 @@
#include <linux/ptrace.h>
#include <asm/types.h>
#ifdef __i386__
#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
static char syscalls[] = {
#include <asm/syscalls_32.h>
};
#else
#define __SYSCALL_64(nr, sym, qual) [nr] = 1,
static char syscalls[] = {
#include <asm/syscalls_64.h>
};
#endif
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
......@@ -94,7 +82,4 @@ void foo(void)
DEFINE(UM_PROT_READ, PROT_READ);
DEFINE(UM_PROT_WRITE, PROT_WRITE);
DEFINE(UM_PROT_EXEC, PROT_EXEC);
DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
DEFINE(NR_syscalls, sizeof(syscalls));
}
......@@ -37,7 +37,7 @@ extern void rcu_nmi_exit(void);
do { \
account_irq_enter_time(current); \
preempt_count_add(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \
lockdep_hardirq_enter(); \
} while (0)
/*
......@@ -50,7 +50,7 @@ extern void irq_enter(void);
*/
#define __irq_exit() \
do { \
trace_hardirq_exit(); \
lockdep_hardirq_exit(); \
account_irq_exit_time(current); \
preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)
......@@ -74,12 +74,12 @@ extern void irq_exit(void);
BUG_ON(in_nmi()); \
preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
trace_hardirq_enter(); \
lockdep_hardirq_enter(); \
} while (0)
#define nmi_exit() \
do { \
trace_hardirq_exit(); \
lockdep_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
......
......@@ -15,15 +15,15 @@
#include <linux/typecheck.h>
#include <asm/irqflags.h>
/* Currently trace_softirqs_on/off is used only by lockdep */
/* Currently lockdep_softirqs_on/off is used only by lockdep */
#ifdef CONFIG_PROVE_LOCKING
extern void trace_softirqs_on(unsigned long ip);
extern void trace_softirqs_off(unsigned long ip);
extern void lockdep_softirqs_on(unsigned long ip);
extern void lockdep_softirqs_off(unsigned long ip);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
#else
static inline void trace_softirqs_on(unsigned long ip) { }
static inline void trace_softirqs_off(unsigned long ip) { }
static inline void lockdep_softirqs_on(unsigned long ip) { }
static inline void lockdep_softirqs_off(unsigned long ip) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
#endif
......@@ -31,20 +31,20 @@
#ifdef CONFIG_TRACE_IRQFLAGS
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# define trace_hardirq_context(p) ((p)->hardirq_context)
# define trace_softirq_context(p) ((p)->softirq_context)
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() \
# define lockdep_hardirq_context(p) ((p)->hardirq_context)
# define lockdep_softirq_context(p) ((p)->softirq_context)
# define lockdep_hardirqs_enabled(p) ((p)->hardirqs_enabled)
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
# define lockdep_hardirq_enter() \
do { \
if (!current->hardirq_context++) \
current->hardirq_threaded = 0; \
} while (0)
# define trace_hardirq_threaded() \
# define lockdep_hardirq_threaded() \
do { \
current->hardirq_threaded = 1; \
} while (0)
# define trace_hardirq_exit() \
# define lockdep_hardirq_exit() \
do { \
current->hardirq_context--; \
} while (0)
......@@ -93,13 +93,13 @@ do { \
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
# define trace_hardirq_context(p) 0
# define trace_softirq_context(p) 0
# define trace_hardirqs_enabled(p) 0
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_threaded() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
# define lockdep_hardirq_context(p) 0
# define lockdep_softirq_context(p) 0
# define lockdep_hardirqs_enabled(p) 0
# define lockdep_softirqs_enabled(p) 0
# define lockdep_hardirq_enter() do { } while (0)
# define lockdep_hardirq_threaded() do { } while (0)
# define lockdep_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) do { } while (0)
......
......@@ -150,7 +150,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
*/
if (irq_settings_can_thread(desc) &&
!(action->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)))
trace_hardirq_threaded();
lockdep_hardirq_threaded();
trace_irq_handler_entry(irq, action);
res = action->handler(irq, action->dev_id);
......
......@@ -3350,10 +3350,10 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
curr->comm, task_pid_nr(curr),
trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
trace_hardirqs_enabled(curr),
trace_softirqs_enabled(curr));
lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
lockdep_hardirqs_enabled(curr),
lockdep_softirqs_enabled(curr));
print_lock(this);
pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
......@@ -3737,7 +3737,7 @@ NOKPROBE_SYMBOL(lockdep_hardirqs_off);
/*
* Softirqs will be enabled:
*/
void trace_softirqs_on(unsigned long ip)
void lockdep_softirqs_on(unsigned long ip)
{
struct task_struct *curr = current;
......@@ -3777,7 +3777,7 @@ void trace_softirqs_on(unsigned long ip)
/*
* Softirqs were disabled:
*/
void trace_softirqs_off(unsigned long ip)
void lockdep_softirqs_off(unsigned long ip)
{
struct task_struct *curr = current;
......
......@@ -126,7 +126,7 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
* Were softirqs turned off above:
*/
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_off(ip);
lockdep_softirqs_off(ip);
raw_local_irq_restore(flags);
if (preempt_count() == cnt) {
......@@ -147,7 +147,7 @@ static void __local_bh_enable(unsigned int cnt)
trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
lockdep_softirqs_on(_RET_IP_);
__preempt_count_sub(cnt);
}
......@@ -174,7 +174,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
* Are softirqs going to be turned on now:
*/
if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
trace_softirqs_on(ip);
lockdep_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
......@@ -224,9 +224,9 @@ static inline bool lockdep_softirq_start(void)
{
bool in_hardirq = false;
if (trace_hardirq_context(current)) {
if (lockdep_hardirq_context(current)) {
in_hardirq = true;
trace_hardirq_exit();
lockdep_hardirq_exit();
}
lockdep_softirq_enter();
......@@ -239,7 +239,7 @@ static inline void lockdep_softirq_end(bool in_hardirq)
lockdep_softirq_exit();
if (in_hardirq)
trace_hardirq_enter();
lockdep_hardirq_enter();
}
#else
static inline bool lockdep_softirq_start(void) { return false; }
......@@ -414,7 +414,8 @@ void irq_exit(void)
tick_irq_exit();
rcu_irq_exit();
trace_hardirq_exit(); /* must be last! */
/* must be last! */
lockdep_hardirq_exit();
}
/*
......
......@@ -2,12 +2,12 @@
#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
# define trace_hardirq_context(p) 0
# define trace_softirq_context(p) 0
# define trace_hardirqs_enabled(p) 0
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
# define lockdep_hardirq_context(p) 0
# define lockdep_softirq_context(p) 0
# define lockdep_hardirqs_enabled(p) 0
# define lockdep_softirqs_enabled(p) 0
# define lockdep_hardirq_enter() do { } while (0)
# define lockdep_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define INIT_TRACE_IRQFLAGS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment