Commit 8530684f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "The big fix here is to our vDSO sigreturn trampoline as, after a
  painfully long stint of debugging, it turned out that fixing some of
  our CFI directives in the merge window lit up a bunch of logic in
  libgcc which has been shown to SEGV in some cases during asynchronous
  pthread cancellation.

  It looks like we can fix this by extending the directives to restore
  most of the interrupted register state from the sigcontext, but it's
  risky and hard to test so we opted to remove the CFI directives for
  now and rely on the unwinder fallback path like we used to.

   - Fix unwinding through vDSO sigreturn trampoline

   - Fix build warnings by raising minimum LD version for PAC

   - Whitelist some Kryo Cortex-A55 derivatives for Meltdown and SSB

   - Fix perf register PC reporting for compat tasks

   - Fix 'make clean' warning for arm64 signal selftests

   - Fix ftrace when BTI is compiled in

   - Avoid building the compat vDSO using GCC plugins"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: Add KRYO{3,4}XX silver CPU cores to SSB safelist
  arm64: perf: Report the PC value in REGS_ABI_32 mode
  kselftest: arm64: Remove redundant clean target
  arm64: kpti: Add KRYO{3, 4}XX silver CPU cores to kpti safelist
  arm64: Don't insert a BTI instruction at inner labels
  arm64: vdso: Don't use gcc plugins for building vgettimeofday.c
  arm64: vdso: Only pass --no-eh-frame-hdr when linker supports it
  arm64: Depend on newer binutils when building PAC
  arm64: compat: Remove 32-bit sigreturn code from the vDSO
  arm64: compat: Always use sigpage for sigreturn trampoline
  arm64: compat: Allow 32-bit vdso and sigpage to co-exist
  arm64: vdso: Disable dwarf unwinding through the sigreturn trampoline
parents 1590a2e1 108447fd
......@@ -1518,9 +1518,9 @@ config ARM64_PTR_AUTH
default y
depends on !KVM || ARM64_VHE
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
# GCC 9.1 and later inserts a .note.gnu.property section note for PAC
# Modern compilers insert a .note.gnu.property section note for PAC
# which is only understood by binutils starting with version 2.33.1.
depends on !CC_IS_GCC || GCC_VERSION < 90100 || LD_VERSION >= 233010000
depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100)
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
......
......@@ -12,7 +12,6 @@
* instead.
*/
#define BTI_C hint 34 ;
#define BTI_J hint 36 ;
/*
* When using in-kernel BTI we need to ensure that PCS-conformant assembly
......@@ -43,11 +42,6 @@
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
BTI_C
#define SYM_INNER_LABEL(name, linkage) \
.type name SYM_T_NONE ASM_NL \
SYM_ENTRY(name, linkage, SYM_A_NONE) \
BTI_J
#endif
/*
......
......@@ -19,6 +19,9 @@
typedef struct {
atomic64_t id;
#ifdef CONFIG_COMPAT
void *sigpage;
#endif
void *vdso;
unsigned long flags;
} mm_context_t;
......
......@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
ifneq ($(CONFIG_COMPAT_VDSO), y)
obj-$(CONFIG_COMPAT) += sigreturn32.o
endif
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
......
......@@ -460,6 +460,8 @@ static const struct midr_range arm64_ssb_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{},
};
......
......@@ -1290,6 +1290,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }
};
char const *str = "kpti command line option";
......
......@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0;
/*
* Compat (i.e. 32 bit) mode:
* - PC has been set in the pt_regs struct in kernel_entry,
* - Handle SP and LR here.
* Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
* we're stuck with it for ABI compatability reasons.
*
* For a 32-bit consumer inspecting a 32-bit task, then it will look at
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
* These correspond directly to a prefix of the registers saved in our
* 'struct pt_regs', with the exception of the PC, so we copy that down
* (x15 corresponds to SP_hyp in the architecture).
*
* So far, so good.
*
* The oddity arises when a 64-bit consumer looks at a 32-bit task and
* asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
* SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
* PC registers would normally live. The initial idea was to allow a
* 64-bit unwinder to unwind a 32-bit task and, although it's not clear
* how well that works in practice, somebody might be relying on it.
*
* At the time we make a sample, we don't know whether the consumer is
* 32-bit or 64-bit, so we have to cater for both possibilities.
*/
if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr;
if (idx == 15)
return regs->pc;
}
if ((u32)idx == PERF_REG_ARM64_SP)
......
......@@ -342,38 +342,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
retcode = ptr_to_compat(ka->sa.sa_restorer);
} else {
/* Set up sigreturn pointer */
#ifdef CONFIG_COMPAT_VDSO
void *vdso_base = current->mm->context.vdso;
void *vdso_trampoline;
if (ka->sa.sa_flags & SA_SIGINFO) {
if (thumb) {
vdso_trampoline = VDSO_SYMBOL(vdso_base,
compat_rt_sigreturn_thumb);
} else {
vdso_trampoline = VDSO_SYMBOL(vdso_base,
compat_rt_sigreturn_arm);
}
} else {
if (thumb) {
vdso_trampoline = VDSO_SYMBOL(vdso_base,
compat_sigreturn_thumb);
} else {
vdso_trampoline = VDSO_SYMBOL(vdso_base,
compat_sigreturn_arm);
}
}
retcode = ptr_to_compat(vdso_trampoline) + thumb;
#else
unsigned int idx = thumb << 1;
if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3;
retcode = (unsigned long)current->mm->context.vdso +
retcode = (unsigned long)current->mm->context.sigpage +
(idx << 2) + thumb;
#endif
}
regs->regs[0] = usig;
......
......@@ -191,15 +191,12 @@ enum aarch32_map {
#ifdef CONFIG_COMPAT_VDSO
AA32_MAP_VVAR,
AA32_MAP_VDSO,
#else
AA32_MAP_SIGPAGE
#endif
AA32_MAP_SIGPAGE
};
static struct page *aarch32_vectors_page __ro_after_init;
#ifndef CONFIG_COMPAT_VDSO
static struct page *aarch32_sig_page __ro_after_init;
#endif
static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VECTORS] = {
......@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
.name = "[vdso]",
.mremap = aarch32_vdso_mremap,
},
#else
#endif /* CONFIG_COMPAT_VDSO */
[AA32_MAP_SIGPAGE] = {
.name = "[sigpage]", /* ABI */
.pages = &aarch32_sig_page,
},
#endif /* CONFIG_COMPAT_VDSO */
};
static int aarch32_alloc_kuser_vdso_page(void)
......@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
return 0;
}
#ifdef CONFIG_COMPAT_VDSO
static int __aarch32_alloc_vdso_pages(void)
{
int ret;
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
ret = __vdso_init(VDSO_ABI_AA32);
if (ret)
return ret;
return aarch32_alloc_kuser_vdso_page();
}
#else
static int __aarch32_alloc_vdso_pages(void)
static int aarch32_alloc_sigpage(void)
{
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long sigpage;
int ret;
sigpage = get_zeroed_page(GFP_ATOMIC);
if (!sigpage)
......@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void)
memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_sig_page = virt_to_page(sigpage);
flush_dcache_page(aarch32_sig_page);
return 0;
}
ret = aarch32_alloc_kuser_vdso_page();
if (ret)
free_page(sigpage);
#ifdef CONFIG_COMPAT_VDSO
static int __aarch32_alloc_vdso_pages(void)
{
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
return ret;
return __vdso_init(VDSO_ABI_AA32);
}
#endif /* CONFIG_COMPAT_VDSO */
static int __init aarch32_alloc_vdso_pages(void)
{
return __aarch32_alloc_vdso_pages();
int ret;
#ifdef CONFIG_COMPAT_VDSO
ret = __aarch32_alloc_vdso_pages();
if (ret)
return ret;
#endif
ret = aarch32_alloc_sigpage();
if (ret)
return ret;
return aarch32_alloc_kuser_vdso_page();
}
arch_initcall(aarch32_alloc_vdso_pages);
......@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
return PTR_ERR_OR_ZERO(ret);
}
#ifndef CONFIG_COMPAT_VDSO
static int aarch32_sigreturn_setup(struct mm_struct *mm)
{
unsigned long addr;
......@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
if (IS_ERR(ret))
goto out;
mm->context.vdso = (void *)addr;
mm->context.sigpage = (void *)addr;
out:
return PTR_ERR_OR_ZERO(ret);
}
#endif /* !CONFIG_COMPAT_VDSO */
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
......@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm,
bprm,
uses_interp);
#else
ret = aarch32_sigreturn_setup(mm);
if (ret)
goto out;
#endif /* CONFIG_COMPAT_VDSO */
ret = aarch32_sigreturn_setup(mm);
out:
mmap_write_unlock(mm);
return ret;
......
......@@ -23,13 +23,14 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
# potential future proofing if we end up with internal calls to the exported
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
# preparation in build-time C")).
ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
-Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T
ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
-Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n \
$(btildflags-y) -T
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
KBUILD_CFLAGS += $(DISABLE_LTO)
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
......
......@@ -18,29 +18,40 @@
.text
/*
* NOTE!!! You may notice that all of the .cfi directives in this file have
* been commented out. This is because they have been shown to trigger segfaults
* in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread
* cleanup handlers during the thread cancellation dance. By omitting the
* directives, we trigger an arm64-specific fallback path in the unwinder which
* recognises the signal frame and restores many of the registers directly from
* the sigcontext. Re-enabling the cfi directives here therefore needs to be
* much more comprehensive to reduce the risk of further regressions.
*/
/* Ensure that the mysterious NOP can be associated with a function. */
.cfi_startproc
// .cfi_startproc
/*
* .cfi_signal_frame causes the corresponding Frame Description Entry in the
* .eh_frame section to be annotated as a signal frame. This allows DWARF
* unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo(), which permits
* unwinding out of the signal trampoline without the need for the mysterious
* NOP.
* .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in
* the .eh_frame section to be annotated as a signal frame. This allows DWARF
* unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify
* the next frame using the unmodified return address instead of subtracting 1,
* which may yield the wrong FDE.
*/
.cfi_signal_frame
// .cfi_signal_frame
/*
* Tell the unwinder where to locate the frame record linking back to the
* interrupted context. We don't provide unwind info for registers other
* than the frame pointer and the link register here; in practice, this
* is sufficient for unwinding in C/C++ based runtimes and the values in
* the sigcontext may have been modified by this point anyway. Debuggers
* interrupted context. We don't provide unwind info for registers other than
* the frame pointer and the link register here; in practice, this is likely to
* be insufficient for unwinding in C/C++ based runtimes, especially without a
* means to restore the stack pointer. Thankfully, unwinders and debuggers
* already have baked-in strategies for attempting to unwind out of signals.
*/
.cfi_def_cfa x29, 0
.cfi_offset x29, 0 * 8
.cfi_offset x30, 1 * 8
// .cfi_def_cfa x29, 0
// .cfi_offset x29, 0 * 8
// .cfi_offset x30, 1 * 8
/*
* This mysterious NOP is required for some unwinders (e.g. libc++) that
......@@ -51,16 +62,19 @@
nop // Mysterious NOP
/*
* GDB relies on being able to identify the sigreturn instruction sequence to
* unwind from signal handlers. We cannot, therefore, use SYM_FUNC_START()
* here, as it will emit a BTI C instruction and break the unwinder. Thankfully,
* this function is only ever called from a RET and so omitting the landing pad
* is perfectly fine.
* GDB, libgcc and libunwind rely on being able to identify the sigreturn
* instruction sequence to unwind from signal handlers. We cannot, therefore,
* use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the
* unwinder. Thankfully, this function is only ever called from a RET and so
* omitting the landing pad is perfectly fine.
*/
SYM_CODE_START(__kernel_rt_sigreturn)
// PLEASE DO NOT MODIFY
mov x8, #__NR_rt_sigreturn
// PLEASE DO NOT MODIFY
svc #0
.cfi_endproc
// PLEASE DO NOT MODIFY
// .cfi_endproc
SYM_CODE_END(__kernel_rt_sigreturn)
emit_aarch64_feature_1_and
......@@ -140,7 +140,6 @@ hostprogs := $(munge)
c-obj-vdso := note.o
c-obj-vdso-gettimeofday := vgettimeofday.o
asm-obj-vdso := sigreturn.o
ifneq ($(c-gettimeofday-y),)
VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y)
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file provides both A32 and T32 versions, in accordance with the
* arm sigreturn code.
*
* Please read the comments in arch/arm64/kernel/vdso/sigreturn.S to
* understand some of the craziness in here.
*
* Copyright (C) 2018 ARM Limited
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
.text
.arm
.fnstart
.save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET
nop
SYM_CODE_START(__kernel_sigreturn_arm)
mov r7, #__NR_compat_sigreturn
svc #0
.fnend
SYM_CODE_END(__kernel_sigreturn_arm)
.fnstart
.save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop
SYM_CODE_START(__kernel_rt_sigreturn_arm)
mov r7, #__NR_compat_rt_sigreturn
svc #0
.fnend
SYM_CODE_END(__kernel_rt_sigreturn_arm)
.thumb
.fnstart
.save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET
nop
SYM_CODE_START(__kernel_sigreturn_thumb)
mov r7, #__NR_compat_sigreturn
svc #0
.fnend
SYM_CODE_END(__kernel_sigreturn_thumb)
.fnstart
.save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop
SYM_CODE_START(__kernel_rt_sigreturn_thumb)
mov r7, #__NR_compat_rt_sigreturn
svc #0
.fnend
SYM_CODE_END(__kernel_rt_sigreturn_thumb)
......@@ -64,19 +64,7 @@ VERSION
__vdso_clock_gettime;
__vdso_gettimeofday;
__vdso_clock_getres;
__kernel_sigreturn_arm;
__kernel_sigreturn_thumb;
__kernel_rt_sigreturn_arm;
__kernel_rt_sigreturn_thumb;
__vdso_clock_gettime64;
local: *;
};
}
/*
* Make the sigreturn code visible to the kernel.
*/
VDSO_compat_sigreturn_arm = __kernel_sigreturn_arm;
VDSO_compat_sigreturn_thumb = __kernel_sigreturn_thumb;
VDSO_compat_rt_sigreturn_arm = __kernel_rt_sigreturn_arm;
VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb;
......@@ -21,10 +21,6 @@ include ../../lib.mk
$(TEST_GEN_PROGS): $(PROGS)
cp $(PROGS) $(OUTPUT)/
clean:
$(CLEAN)
rm -f $(PROGS)
# Common test-unit targets to build common-layout test-cases executables
# Needs secondary expansion to properly include the testcase c-file in pre-reqs
.SECONDEXPANSION:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment