Commit e5159827 authored by Will Deacon's avatar Will Deacon

Merge branches 'for-next/asm' and 'for-next/insn' into for-next/bti

Merge in dependencies for in-kernel Branch Target Identification support.

* for-next/asm:
  arm64: Disable old style assembly annotations
  arm64: kernel: Convert to modern annotations for assembly functions
  arm64: entry: Refactor and modernise annotation for ret_to_user
  x86/asm: Provide a Kconfig symbol for disabling old assembly annotations
  x86/32: Remove CONFIG_DOUBLEFAULT

* for-next/insn:
  arm64: insn: Report PAC and BTI instructions as skippable
  arm64: insn: Don't assume unrecognized HINTs are skippable
  arm64: insn: Provide a better name for aarch64_insn_is_nop()
  arm64: insn: Add constants for new HINT instruction decode
......@@ -66,6 +66,7 @@ config ARM64
select ARCH_USE_GNU_PROPERTY
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
......
......@@ -39,13 +39,37 @@ enum aarch64_insn_encoding_class {
* system instructions */
};
enum aarch64_insn_hint_op {
enum aarch64_insn_hint_cr_op {
AARCH64_INSN_HINT_NOP = 0x0 << 5,
AARCH64_INSN_HINT_YIELD = 0x1 << 5,
AARCH64_INSN_HINT_WFE = 0x2 << 5,
AARCH64_INSN_HINT_WFI = 0x3 << 5,
AARCH64_INSN_HINT_SEV = 0x4 << 5,
AARCH64_INSN_HINT_SEVL = 0x5 << 5,
AARCH64_INSN_HINT_XPACLRI = 0x07 << 5,
AARCH64_INSN_HINT_PACIA_1716 = 0x08 << 5,
AARCH64_INSN_HINT_PACIB_1716 = 0x0A << 5,
AARCH64_INSN_HINT_AUTIA_1716 = 0x0C << 5,
AARCH64_INSN_HINT_AUTIB_1716 = 0x0E << 5,
AARCH64_INSN_HINT_PACIAZ = 0x18 << 5,
AARCH64_INSN_HINT_PACIASP = 0x19 << 5,
AARCH64_INSN_HINT_PACIBZ = 0x1A << 5,
AARCH64_INSN_HINT_PACIBSP = 0x1B << 5,
AARCH64_INSN_HINT_AUTIAZ = 0x1C << 5,
AARCH64_INSN_HINT_AUTIASP = 0x1D << 5,
AARCH64_INSN_HINT_AUTIBZ = 0x1E << 5,
AARCH64_INSN_HINT_AUTIBSP = 0x1F << 5,
AARCH64_INSN_HINT_ESB = 0x10 << 5,
AARCH64_INSN_HINT_PSB = 0x11 << 5,
AARCH64_INSN_HINT_TSB = 0x12 << 5,
AARCH64_INSN_HINT_CSDB = 0x14 << 5,
AARCH64_INSN_HINT_BTI = 0x20 << 5,
AARCH64_INSN_HINT_BTIC = 0x22 << 5,
AARCH64_INSN_HINT_BTIJ = 0x24 << 5,
AARCH64_INSN_HINT_BTIJC = 0x26 << 5,
};
enum aarch64_insn_imm_type {
......@@ -344,7 +368,7 @@ __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
#undef __AARCH64_INSN_FUNCS
bool aarch64_insn_is_nop(u32 insn);
bool aarch64_insn_is_steppable_hint(u32 insn);
bool aarch64_insn_is_branch_imm(u32 insn);
static inline bool aarch64_insn_is_adr_adrp(u32 insn)
......@@ -370,7 +394,7 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_branch_type type);
u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
enum aarch64_insn_condition cond);
u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op);
u32 aarch64_insn_gen_nop(void);
u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
enum aarch64_insn_branch_type type);
......
......@@ -29,7 +29,7 @@
* branch to what would be the reset vector. It must be executed with the
* flat identity mapping.
*/
ENTRY(__cpu_soft_restart)
SYM_CODE_START(__cpu_soft_restart)
/* Clear sctlr_el1 flags. */
mrs x12, sctlr_el1
mov_q x13, SCTLR_ELx_FLAGS
......@@ -47,6 +47,6 @@ ENTRY(__cpu_soft_restart)
mov x1, x3 // arg1
mov x2, x4 // arg2
br x8
ENDPROC(__cpu_soft_restart)
SYM_CODE_END(__cpu_soft_restart)
.popsection
......@@ -5,7 +5,7 @@
#include <linux/linkage.h>
ENTRY(__efi_rt_asm_wrapper)
SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x29, x30, [sp, #-32]!
mov x29, sp
......@@ -35,4 +35,4 @@ ENTRY(__efi_rt_asm_wrapper)
b.ne 0f
ret
0: b efi_handle_corrupted_x18 // tail call
ENDPROC(__efi_rt_asm_wrapper)
SYM_FUNC_END(__efi_rt_asm_wrapper)
......@@ -16,34 +16,34 @@
*
* x0 - pointer to struct fpsimd_state
*/
ENTRY(fpsimd_save_state)
SYM_FUNC_START(fpsimd_save_state)
fpsimd_save x0, 8
ret
ENDPROC(fpsimd_save_state)
SYM_FUNC_END(fpsimd_save_state)
/*
* Load the FP registers.
*
* x0 - pointer to struct fpsimd_state
*/
ENTRY(fpsimd_load_state)
SYM_FUNC_START(fpsimd_load_state)
fpsimd_restore x0, 8
ret
ENDPROC(fpsimd_load_state)
SYM_FUNC_END(fpsimd_load_state)
#ifdef CONFIG_ARM64_SVE
ENTRY(sve_save_state)
SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2
ret
ENDPROC(sve_save_state)
SYM_FUNC_END(sve_save_state)
ENTRY(sve_load_state)
SYM_FUNC_START(sve_load_state)
sve_load 0, x1, x2, 3, x4
ret
ENDPROC(sve_load_state)
SYM_FUNC_END(sve_load_state)
ENTRY(sve_get_vl)
SYM_FUNC_START(sve_get_vl)
_sve_rdvl 0, 1
ret
ENDPROC(sve_get_vl)
SYM_FUNC_END(sve_get_vl)
#endif /* CONFIG_ARM64_SVE */
......@@ -727,21 +727,10 @@ el0_error_naked:
b ret_to_user
SYM_CODE_END(el0_error)
/*
* Ok, we need to do extra processing, enter the slow path.
*/
work_pending:
mov x0, sp // 'regs'
bl do_notify_resume
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
/*
* "slow" syscall return path.
*/
ret_to_user:
SYM_CODE_START_LOCAL(ret_to_user)
disable_daif
gic_prio_kentry_setup tmp=x3
ldr x1, [tsk, #TSK_TI_FLAGS]
......@@ -753,7 +742,19 @@ finish_ret_to_user:
bl stackleak_erase
#endif
kernel_exit 0
ENDPROC(ret_to_user)
/*
* Ok, we need to do extra processing, enter the slow path.
*/
work_pending:
mov x0, sp // 'regs'
bl do_notify_resume
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on // enabled while in userspace
#endif
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
SYM_CODE_END(ret_to_user)
.popsection // .entry.text
......
......@@ -65,7 +65,7 @@
* x5: physical address of a zero page that remains zero after resume
*/
.pushsection ".hibernate_exit.text", "ax"
ENTRY(swsusp_arch_suspend_exit)
SYM_CODE_START(swsusp_arch_suspend_exit)
/*
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
......@@ -110,7 +110,7 @@ ENTRY(swsusp_arch_suspend_exit)
cbz x24, 3f /* Do we need to re-initialise EL2? */
hvc #0
3: ret
ENDPROC(swsusp_arch_suspend_exit)
SYM_CODE_END(swsusp_arch_suspend_exit)
/*
* Restore the hyp stub.
......@@ -119,15 +119,15 @@ ENDPROC(swsusp_arch_suspend_exit)
*
* x24: The physical address of __hyp_stub_vectors
*/
el1_sync:
SYM_CODE_START_LOCAL(el1_sync)
msr vbar_el2, x24
eret
ENDPROC(el1_sync)
SYM_CODE_END(el1_sync)
.macro invalid_vector label
\label:
SYM_CODE_START_LOCAL(\label)
b \label
ENDPROC(\label)
SYM_CODE_END(\label)
.endm
invalid_vector el2_sync_invalid
......@@ -141,7 +141,7 @@ ENDPROC(\label)
/* el2 vectors - switch el2 here while we restore the memory image. */
.align 11
ENTRY(hibernate_el2_vectors)
SYM_CODE_START(hibernate_el2_vectors)
ventry el2_sync_invalid // Synchronous EL2t
ventry el2_irq_invalid // IRQ EL2t
ventry el2_fiq_invalid // FIQ EL2t
......@@ -161,6 +161,6 @@ ENTRY(hibernate_el2_vectors)
ventry el1_irq_invalid // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1
END(hibernate_el2_vectors)
SYM_CODE_END(hibernate_el2_vectors)
.popsection
......@@ -21,7 +21,7 @@
.align 11
ENTRY(__hyp_stub_vectors)
SYM_CODE_START(__hyp_stub_vectors)
ventry el2_sync_invalid // Synchronous EL2t
ventry el2_irq_invalid // IRQ EL2t
ventry el2_fiq_invalid // FIQ EL2t
......@@ -41,11 +41,11 @@ ENTRY(__hyp_stub_vectors)
ventry el1_irq_invalid // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__hyp_stub_vectors)
SYM_CODE_END(__hyp_stub_vectors)
.align 11
el1_sync:
SYM_CODE_START_LOCAL(el1_sync)
cmp x0, #HVC_SET_VECTORS
b.ne 2f
msr vbar_el2, x1
......@@ -68,12 +68,12 @@ el1_sync:
9: mov x0, xzr
eret
ENDPROC(el1_sync)
SYM_CODE_END(el1_sync)
.macro invalid_vector label
\label:
SYM_CODE_START_LOCAL(\label)
b \label
ENDPROC(\label)
SYM_CODE_END(\label)
.endm
invalid_vector el2_sync_invalid
......@@ -106,15 +106,15 @@ ENDPROC(\label)
* initialisation entry point.
*/
ENTRY(__hyp_set_vectors)
SYM_FUNC_START(__hyp_set_vectors)
mov x1, x0
mov x0, #HVC_SET_VECTORS
hvc #0
ret
ENDPROC(__hyp_set_vectors)
SYM_FUNC_END(__hyp_set_vectors)
ENTRY(__hyp_reset_vectors)
SYM_FUNC_START(__hyp_reset_vectors)
mov x0, #HVC_RESET_VECTORS
hvc #0
ret
ENDPROC(__hyp_reset_vectors)
SYM_FUNC_END(__hyp_reset_vectors)
......@@ -51,21 +51,33 @@ enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
}
/* NOP is an alias of HINT */
bool __kprobes aarch64_insn_is_nop(u32 insn)
bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
{
if (!aarch64_insn_is_hint(insn))
return false;
switch (insn & 0xFE0) {
case AARCH64_INSN_HINT_YIELD:
case AARCH64_INSN_HINT_WFE:
case AARCH64_INSN_HINT_WFI:
case AARCH64_INSN_HINT_SEV:
case AARCH64_INSN_HINT_SEVL:
return false;
default:
case AARCH64_INSN_HINT_XPACLRI:
case AARCH64_INSN_HINT_PACIA_1716:
case AARCH64_INSN_HINT_PACIB_1716:
case AARCH64_INSN_HINT_AUTIA_1716:
case AARCH64_INSN_HINT_AUTIB_1716:
case AARCH64_INSN_HINT_PACIAZ:
case AARCH64_INSN_HINT_PACIASP:
case AARCH64_INSN_HINT_PACIBZ:
case AARCH64_INSN_HINT_PACIBSP:
case AARCH64_INSN_HINT_AUTIAZ:
case AARCH64_INSN_HINT_AUTIASP:
case AARCH64_INSN_HINT_AUTIBZ:
case AARCH64_INSN_HINT_AUTIBSP:
case AARCH64_INSN_HINT_BTI:
case AARCH64_INSN_HINT_BTIC:
case AARCH64_INSN_HINT_BTIJ:
case AARCH64_INSN_HINT_BTIJC:
case AARCH64_INSN_HINT_NOP:
return true;
default:
return false;
}
}
......@@ -574,7 +586,7 @@ u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
offset >> 2);
}
u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
{
return aarch64_insn_get_hint_value() | op;
}
......
......@@ -46,7 +46,7 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
* except for the NOP case.
*/
if (aarch64_insn_is_hint(insn))
return aarch64_insn_is_nop(insn);
return aarch64_insn_is_steppable_hint(insn);
return true;
}
......
......@@ -61,7 +61,7 @@
ldp x28, x29, [sp, #S_X28]
.endm
ENTRY(kretprobe_trampoline)
SYM_CODE_START(kretprobe_trampoline)
sub sp, sp, #S_FRAME_SIZE
save_all_base_regs
......@@ -79,4 +79,4 @@ ENTRY(kretprobe_trampoline)
add sp, sp, #S_FRAME_SIZE
ret
ENDPROC(kretprobe_trampoline)
SYM_CODE_END(kretprobe_trampoline)
......@@ -5,81 +5,81 @@
#include <linux/linkage.h>
ENTRY(absolute_data64)
SYM_FUNC_START(absolute_data64)
ldr x0, 0f
ret
0: .quad sym64_abs
ENDPROC(absolute_data64)
SYM_FUNC_END(absolute_data64)
ENTRY(absolute_data32)
SYM_FUNC_START(absolute_data32)
ldr w0, 0f
ret
0: .long sym32_abs
ENDPROC(absolute_data32)
SYM_FUNC_END(absolute_data32)
ENTRY(absolute_data16)
SYM_FUNC_START(absolute_data16)
adr x0, 0f
ldrh w0, [x0]
ret
0: .short sym16_abs, 0
ENDPROC(absolute_data16)
SYM_FUNC_END(absolute_data16)
ENTRY(signed_movw)
SYM_FUNC_START(signed_movw)
movz x0, #:abs_g2_s:sym64_abs
movk x0, #:abs_g1_nc:sym64_abs
movk x0, #:abs_g0_nc:sym64_abs
ret
ENDPROC(signed_movw)
SYM_FUNC_END(signed_movw)
ENTRY(unsigned_movw)
SYM_FUNC_START(unsigned_movw)
movz x0, #:abs_g3:sym64_abs
movk x0, #:abs_g2_nc:sym64_abs
movk x0, #:abs_g1_nc:sym64_abs
movk x0, #:abs_g0_nc:sym64_abs
ret
ENDPROC(unsigned_movw)
SYM_FUNC_END(unsigned_movw)
.align 12
.space 0xff8
ENTRY(relative_adrp)
SYM_FUNC_START(relative_adrp)
adrp x0, sym64_rel
add x0, x0, #:lo12:sym64_rel
ret
ENDPROC(relative_adrp)
SYM_FUNC_END(relative_adrp)
.align 12
.space 0xffc
ENTRY(relative_adrp_far)
SYM_FUNC_START(relative_adrp_far)
adrp x0, memstart_addr
add x0, x0, #:lo12:memstart_addr
ret
ENDPROC(relative_adrp_far)
SYM_FUNC_END(relative_adrp_far)
ENTRY(relative_adr)
SYM_FUNC_START(relative_adr)
adr x0, sym64_rel
ret
ENDPROC(relative_adr)
SYM_FUNC_END(relative_adr)
ENTRY(relative_data64)
SYM_FUNC_START(relative_data64)
adr x1, 0f
ldr x0, [x1]
add x0, x0, x1
ret
0: .quad sym64_rel - .
ENDPROC(relative_data64)
SYM_FUNC_END(relative_data64)
ENTRY(relative_data32)
SYM_FUNC_START(relative_data32)
adr x1, 0f
ldr w0, [x1]
add x0, x0, x1
ret
0: .long sym64_rel - .
ENDPROC(relative_data32)
SYM_FUNC_END(relative_data32)
ENTRY(relative_data16)
SYM_FUNC_START(relative_data16)
adr x1, 0f
ldrsh w0, [x1]
add x0, x0, x1
ret
0: .short sym64_rel - ., 0
ENDPROC(relative_data16)
SYM_FUNC_END(relative_data16)
......@@ -26,7 +26,7 @@
* control_code_page, a special page which has been set up to be preserved
* during the copy operation.
*/
ENTRY(arm64_relocate_new_kernel)
SYM_CODE_START(arm64_relocate_new_kernel)
/* Setup the list loop variables. */
mov x18, x2 /* x18 = dtb address */
......@@ -111,7 +111,7 @@ ENTRY(arm64_relocate_new_kernel)
mov x3, xzr
br x17
ENDPROC(arm64_relocate_new_kernel)
SYM_CODE_END(arm64_relocate_new_kernel)
.align 3 /* To keep the 64-bit values below naturally aligned. */
......
......@@ -62,7 +62,7 @@
*
* x0 = struct sleep_stack_data area
*/
ENTRY(__cpu_suspend_enter)
SYM_FUNC_START(__cpu_suspend_enter)
stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
......@@ -95,10 +95,10 @@ ENTRY(__cpu_suspend_enter)
ldp x29, lr, [sp], #16
mov x0, #1
ret
ENDPROC(__cpu_suspend_enter)
SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
ENTRY(cpu_resume)
SYM_CODE_START(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
mov x0, #ARM64_CPU_RUNTIME
bl __cpu_setup
......@@ -107,11 +107,11 @@ ENTRY(cpu_resume)
bl __enable_mmu
ldr x8, =_cpu_resume
br x8
ENDPROC(cpu_resume)
SYM_CODE_END(cpu_resume)
.ltorg
.popsection
ENTRY(_cpu_resume)
SYM_FUNC_START(_cpu_resume)
mrs x1, mpidr_el1
adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address
......@@ -147,4 +147,4 @@ ENTRY(_cpu_resume)
ldp x29, lr, [x29]
mov x0, #0
ret
ENDPROC(_cpu_resume)
SYM_FUNC_END(_cpu_resume)
......@@ -30,9 +30,9 @@
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_smc)
SYM_FUNC_START(__arm_smccc_smc)
SMCCC smc
ENDPROC(__arm_smccc_smc)
SYM_FUNC_END(__arm_smccc_smc)
EXPORT_SYMBOL(__arm_smccc_smc)
/*
......@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__arm_smccc_smc)
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
ENTRY(__arm_smccc_hvc)
SYM_FUNC_START(__arm_smccc_hvc)
SMCCC hvc
ENDPROC(__arm_smccc_hvc)
SYM_FUNC_END(__arm_smccc_hvc)
EXPORT_SYMBOL(__arm_smccc_hvc)
......@@ -91,6 +91,7 @@ config X86
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
......
......@@ -99,15 +99,6 @@ config DEBUG_WX
If in doubt, say "Y".
config DOUBLEFAULT
default y
bool "Enable doublefault exception handler" if EXPERT && X86_32
---help---
This option allows trapping of rare doublefault exceptions that
would otherwise cause a system to silently reboot. Disabling this
option saves about 4k and might cause you much additional grey
hair.
config DEBUG_TLBFLUSH
bool "Set upper limit of TLB entries to flush one-by-one"
depends on DEBUG_KERNEL
......
......@@ -1536,7 +1536,6 @@ SYM_CODE_START(debug)
jmp common_exception
SYM_CODE_END(debug)
#ifdef CONFIG_DOUBLEFAULT
SYM_CODE_START(double_fault)
1:
/*
......@@ -1576,7 +1575,6 @@ SYM_CODE_START(double_fault)
hlt
jmp 1b
SYM_CODE_END(double_fault)
#endif
/*
* NMI is doubly nasty. It can happen on the first instruction of
......
......@@ -2,7 +2,7 @@
#ifndef _ASM_X86_DOUBLEFAULT_H
#define _ASM_X86_DOUBLEFAULT_H
#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT)
#ifdef CONFIG_X86_32
extern void doublefault_init_cpu_tss(void);
#else
static inline void doublefault_init_cpu_tss(void)
......
......@@ -69,9 +69,7 @@ dotraplinkage void do_overflow(struct pt_regs *regs, long error_code);
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code);
dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code);
dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code);
#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2);
#endif
dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code);
dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);
......
......@@ -102,9 +102,7 @@ obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-y += kprobes/
obj-$(CONFIG_MODULES) += module.o
ifeq ($(CONFIG_X86_32),y)
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
endif
obj-$(CONFIG_X86_32) += doublefault_32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_VM86) += vm86_32.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
......
......@@ -87,7 +87,6 @@ static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info)
{
#ifdef CONFIG_DOUBLEFAULT
struct cpu_entry_area *cea = get_cpu_entry_area(raw_smp_processor_id());
struct doublefault_stack *ss = &cea->doublefault_stack;
......@@ -103,9 +102,6 @@ static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info)
info->next_sp = (unsigned long *)this_cpu_read(cpu_tss_rw.x86_tss.sp);
return true;
#else
return false;
#endif
}
......
......@@ -326,7 +326,6 @@ __visible void __noreturn handle_stack_overflow(const char *message,
}
#endif
#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
/*
* Runs on an IST stack for x86_64 and on a special task stack for x86_32.
*
......@@ -450,7 +449,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
die("double fault", regs, error_code);
panic("Machine halted.");
}
#endif
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
{
......
......@@ -17,7 +17,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif
#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT)
#ifdef CONFIG_X86_32
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
#endif
......@@ -114,12 +114,10 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
#else
static inline void percpu_setup_exception_stacks(unsigned int cpu)
{
#ifdef CONFIG_DOUBLEFAULT
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
cea_map_percpu_pages(&cea->doublefault_stack,
&per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
#endif
}
#endif
......
......@@ -105,7 +105,7 @@
/* === DEPRECATED annotations === */
#ifndef CONFIG_X86
#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef GLOBAL
/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
#define GLOBAL(name) \
......@@ -118,10 +118,10 @@
#define ENTRY(name) \
SYM_FUNC_START(name)
#endif
#endif /* CONFIG_X86 */
#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
#endif /* LINKER_SCRIPT */
#ifndef CONFIG_X86
#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef WEAK
/* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \
......@@ -143,7 +143,7 @@
#define ENDPROC(name) \
SYM_FUNC_END(name)
#endif
#endif /* CONFIG_X86 */
#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
/* === generic annotations === */
......
......@@ -80,6 +80,9 @@ config ARCH_USE_CMPXCHG_LOCKREF
config ARCH_HAS_FAST_MULTIPLIER
bool
config ARCH_USE_SYM_ANNOTATIONS
bool
config INDIRECT_PIO
bool "Access I/O in non-MMIO mode"
depends on ARM64
......
......@@ -58,7 +58,6 @@ CONFIG_RCU_EQS_DEBUG=y
CONFIG_USER_STACKTRACE_SUPPORT=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DOUBLEFAULT=y
CONFIG_X86_DEBUG_FPU=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_PAGEALLOC=y
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment