Commit 23221d99 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:
 "Nothing particularly stands out here, probably because people were
  tied up with spectre/meltdown stuff last time around. Still, the main
  pieces are:

   - Rework of our CPU features framework so that we can whitelist CPUs
     that don't require kpti even in a heterogeneous system

   - Support for the IDC/DIC architecture extensions, which allow us to
     elide instruction and data cache maintenance when writing out
     instructions

   - Removal of the large memory model which resulted in suboptimal
     codegen by the compiler and increased the use of literal pools,
     which could potentially be used as ROP gadgets since they are
     mapped as executable

   - Rework of forced signal delivery so that the siginfo_t is
     well-formed and handling of show_unhandled_signals is consolidated
     and made consistent between different fault types

   - More siginfo cleanup based on the initial patches from Eric
     Biederman

   - Workaround for Cortex-A55 erratum #1024718

   - Some small ACPI IORT updates and cleanups from Lorenzo Pieralisi

   - Misc cleanups and non-critical fixes"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits)
  arm64: uaccess: Fix omissions from usercopy whitelist
  arm64: fpsimd: Split cpu field out from struct fpsimd_state
  arm64: tlbflush: avoid writing RES0 bits
  arm64: cmpxchg: Include linux/compiler.h in asm/cmpxchg.h
  arm64: move percpu cmpxchg implementation from cmpxchg.h to percpu.h
  arm64: cmpxchg: Include build_bug.h instead of bug.h for BUILD_BUG
  arm64: lse: Include compiler_types.h and export.h for out-of-line LL/SC
  arm64: fpsimd: include <linux/init.h> in fpsimd.h
  drivers/perf: arm_pmu_platform: do not warn about affinity on uniprocessor
  perf: arm_spe: include linux/vmalloc.h for vmap()
  Revert "arm64: Revert L1_CACHE_SHIFT back to 6 (64-byte cache line size)"
  arm64: cpufeature: Avoid warnings due to unused symbols
  arm64: Add work around for Arm Cortex-A55 Erratum 1024718
  arm64: Delay enabling hardware DBM feature
  arm64: Add MIDR encoding for Arm Cortex-A55 and Cortex-A35
  arm64: capabilities: Handle shared entries
  arm64: capabilities: Add support for checks based on a list of MIDRs
  arm64: Add helpers for checking CPU MIDR against a range
  arm64: capabilities: Clean up midr range helpers
  arm64: capabilities: Change scope of VHE to Boot CPU feature
  ...
parents 5b1f3dc9 65896545
...@@ -110,7 +110,7 @@ infrastructure: ...@@ -110,7 +110,7 @@ infrastructure:
x--------------------------------------------------x x--------------------------------------------------x
| Name | bits | visible | | Name | bits | visible |
|--------------------------------------------------| |--------------------------------------------------|
| RES0 | [63-52] | n | | TS | [55-52] | y |
|--------------------------------------------------| |--------------------------------------------------|
| FHM | [51-48] | y | | FHM | [51-48] | y |
|--------------------------------------------------| |--------------------------------------------------|
...@@ -124,8 +124,6 @@ infrastructure: ...@@ -124,8 +124,6 @@ infrastructure:
|--------------------------------------------------| |--------------------------------------------------|
| RDM | [31-28] | y | | RDM | [31-28] | y |
|--------------------------------------------------| |--------------------------------------------------|
| RES0 | [27-24] | n |
|--------------------------------------------------|
| ATOMICS | [23-20] | y | | ATOMICS | [23-20] | y |
|--------------------------------------------------| |--------------------------------------------------|
| CRC32 | [19-16] | y | | CRC32 | [19-16] | y |
...@@ -135,8 +133,6 @@ infrastructure: ...@@ -135,8 +133,6 @@ infrastructure:
| SHA1 | [11-8] | y | | SHA1 | [11-8] | y |
|--------------------------------------------------| |--------------------------------------------------|
| AES | [7-4] | y | | AES | [7-4] | y |
|--------------------------------------------------|
| RES0 | [3-0] | n |
x--------------------------------------------------x x--------------------------------------------------x
...@@ -144,12 +140,10 @@ infrastructure: ...@@ -144,12 +140,10 @@ infrastructure:
x--------------------------------------------------x x--------------------------------------------------x
| Name | bits | visible | | Name | bits | visible |
|--------------------------------------------------| |--------------------------------------------------|
| RES0 | [63-36] | n | | DIT | [51-48] | y |
|--------------------------------------------------| |--------------------------------------------------|
| SVE | [35-32] | y | | SVE | [35-32] | y |
|--------------------------------------------------| |--------------------------------------------------|
| RES0 | [31-28] | n |
|--------------------------------------------------|
| GIC | [27-24] | n | | GIC | [27-24] | n |
|--------------------------------------------------| |--------------------------------------------------|
| AdvSIMD | [23-20] | y | | AdvSIMD | [23-20] | y |
...@@ -199,6 +193,14 @@ infrastructure: ...@@ -199,6 +193,14 @@ infrastructure:
| DPB | [3-0] | y | | DPB | [3-0] | y |
x--------------------------------------------------x x--------------------------------------------------x
5) ID_AA64MMFR2_EL1 - Memory model feature register 2
x--------------------------------------------------x
| Name | bits | visible |
|--------------------------------------------------|
| AT | [35-32] | y |
x--------------------------------------------------x
Appendix I: Example Appendix I: Example
--------------------------- ---------------------------
......
...@@ -162,3 +162,19 @@ HWCAP_SVE ...@@ -162,3 +162,19 @@ HWCAP_SVE
HWCAP_ASIMDFHM HWCAP_ASIMDFHM
Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001. Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001.
HWCAP_DIT
Functionality implied by ID_AA64PFR0_EL1.DIT == 0b0001.
HWCAP_USCAT
Functionality implied by ID_AA64MMFR2_EL1.AT == 0b0001.
HWCAP_ILRCPC
Functionality implied by ID_AA64ISR1_EL1.LRCPC == 0b0002.
HWCAP_FLAGM
Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
...@@ -55,6 +55,7 @@ stable kernels. ...@@ -55,6 +55,7 @@ stable kernels.
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| ARM | Cortex-A72 | #853709 | N/A | | ARM | Cortex-A72 | #853709 | N/A |
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | MMU-500 | #841119,#826419 | N/A | | ARM | MMU-500 | #841119,#826419 | N/A |
| | | | | | | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
......
...@@ -459,12 +459,26 @@ config ARM64_ERRATUM_845719 ...@@ -459,12 +459,26 @@ config ARM64_ERRATUM_845719
config ARM64_ERRATUM_843419 config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address" bool "Cortex-A53: 843419: A load or store might access an incorrect address"
default y default y
select ARM64_MODULE_CMODEL_LARGE if MODULES select ARM64_MODULE_PLTS if MODULES
help help
This option links the kernel with '--fix-cortex-a53-843419' and This option links the kernel with '--fix-cortex-a53-843419' and
builds modules using the large memory model in order to avoid the use enables PLT support to replace certain ADRP instructions, which can
of the ADRP instruction, which can cause a subsequent memory access cause subsequent memory accesses to use an incorrect address on
to use an incorrect address on Cortex-A53 parts up to r0p4. Cortex-A53 parts up to r0p4.
If unsure, say Y.
config ARM64_ERRATUM_1024718
bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
default y
help
This option adds work around for Arm Cortex-A55 Erratum 1024718.
Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
update of the hardware dirty bit when the DBM/AP bits are updated
without a break-before-make. The work around is to disable the usage
of hardware DBM locally on the affected cores. CPUs not affected by
erratum will continue to use the feature.
If unsure, say Y. If unsure, say Y.
...@@ -1108,12 +1122,25 @@ config ARM64_SVE ...@@ -1108,12 +1122,25 @@ config ARM64_SVE
To enable use of this extension on CPUs that implement it, say Y. To enable use of this extension on CPUs that implement it, say Y.
config ARM64_MODULE_CMODEL_LARGE Note that for architectural reasons, firmware _must_ implement SVE
bool support when running on SVE capable hardware. The required support
is present in:
* version 1.5 and later of the ARM Trusted Firmware
* the AArch64 boot wrapper since commit 5e1261e08abf
("bootwrapper: SVE: Enable SVE for EL2 and below").
For other firmware implementations, consult the firmware documentation
or vendor.
If you need the kernel to boot on SVE-capable hardware with broken
firmware, you may need to say N here until you get your firmware
fixed. Otherwise, you may experience firmware panics or lockups when
booting the kernel. If unsure and you are not observing these
symptoms, you should assume that it is safe to say Y.
config ARM64_MODULE_PLTS config ARM64_MODULE_PLTS
bool bool
select ARM64_MODULE_CMODEL_LARGE
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
config RELOCATABLE config RELOCATABLE
...@@ -1147,12 +1174,12 @@ config RANDOMIZE_BASE ...@@ -1147,12 +1174,12 @@ config RANDOMIZE_BASE
If unsure, say N. If unsure, say N.
config RANDOMIZE_MODULE_REGION_FULL config RANDOMIZE_MODULE_REGION_FULL
bool "Randomize the module region independently from the core kernel" bool "Randomize the module region over a 4 GB range"
depends on RANDOMIZE_BASE depends on RANDOMIZE_BASE
default y default y
help help
Randomizes the location of the module region without considering the Randomizes the location of the module region inside a 4 GB window
location of the core kernel. This way, it is impossible for modules covering the core kernel. This way, it is less likely for modules
to leak information about the location of core kernel data structures to leak information about the location of core kernel data structures
but it does imply that function calls between modules and the core but it does imply that function calls between modules and the core
kernel will need to be resolved via veneers in the module PLT. kernel will need to be resolved via veneers in the module PLT.
......
...@@ -51,7 +51,6 @@ endif ...@@ -51,7 +51,6 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
...@@ -77,10 +76,6 @@ endif ...@@ -77,10 +76,6 @@ endif
CHECKFLAGS += -D__aarch64__ -m64 CHECKFLAGS += -D__aarch64__ -m64
ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y)
KBUILD_CFLAGS_MODULE += -mcmodel=large
endif
ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
endif endif
...@@ -97,12 +92,14 @@ else ...@@ -97,12 +92,14 @@ else
TEXT_OFFSET := 0x00080000 TEXT_OFFSET := 0x00080000
endif endif
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61) # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
# in 32-bit arithmetic # in 32-bit arithmetic
KASAN_SHADOW_SCALE_SHIFT := 3
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \ + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
- (1 << (64 - 32 - 3)) )) ) - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
export TEXT_OFFSET GZFLAGS export TEXT_OFFSET GZFLAGS
......
...@@ -202,25 +202,15 @@ lr .req x30 // link register ...@@ -202,25 +202,15 @@ lr .req x30 // link register
/* /*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
* <symbol> is within the range +/- 4 GB of the PC when running * <symbol> is within the range +/- 4 GB of the PC.
* in core kernel context. In module context, a movz/movk sequence
* is used, since modules may be loaded far away from the kernel
* when KASLR is in effect.
*/ */
/* /*
* @dst: destination register (64 bit wide) * @dst: destination register (64 bit wide)
* @sym: name of the symbol * @sym: name of the symbol
*/ */
.macro adr_l, dst, sym .macro adr_l, dst, sym
#ifndef MODULE
adrp \dst, \sym adrp \dst, \sym
add \dst, \dst, :lo12:\sym add \dst, \dst, :lo12:\sym
#else
movz \dst, #:abs_g3:\sym
movk \dst, #:abs_g2_nc:\sym
movk \dst, #:abs_g1_nc:\sym
movk \dst, #:abs_g0_nc:\sym
#endif
.endm .endm
/* /*
...@@ -231,7 +221,6 @@ lr .req x30 // link register ...@@ -231,7 +221,6 @@ lr .req x30 // link register
* the address * the address
*/ */
.macro ldr_l, dst, sym, tmp= .macro ldr_l, dst, sym, tmp=
#ifndef MODULE
.ifb \tmp .ifb \tmp
adrp \dst, \sym adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym] ldr \dst, [\dst, :lo12:\sym]
...@@ -239,15 +228,6 @@ lr .req x30 // link register ...@@ -239,15 +228,6 @@ lr .req x30 // link register
adrp \tmp, \sym adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym] ldr \dst, [\tmp, :lo12:\sym]
.endif .endif
#else
.ifb \tmp
adr_l \dst, \sym
ldr \dst, [\dst]
.else
adr_l \tmp, \sym
ldr \dst, [\tmp]
.endif
#endif
.endm .endm
/* /*
...@@ -257,28 +237,18 @@ lr .req x30 // link register ...@@ -257,28 +237,18 @@ lr .req x30 // link register
* while <src> needs to be preserved. * while <src> needs to be preserved.
*/ */
.macro str_l, src, sym, tmp .macro str_l, src, sym, tmp
#ifndef MODULE
adrp \tmp, \sym adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym] str \src, [\tmp, :lo12:\sym]
#else
adr_l \tmp, \sym
str \src, [\tmp]
#endif
.endm .endm
/* /*
* @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
* non-module code
* @sym: The name of the per-cpu variable * @sym: The name of the per-cpu variable
* @tmp: scratch register * @tmp: scratch register
*/ */
.macro adr_this_cpu, dst, sym, tmp .macro adr_this_cpu, dst, sym, tmp
#ifndef MODULE
adrp \tmp, \sym adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym add \dst, \tmp, #:lo12:\sym
#else
adr_l \dst, \sym
#endif
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1 mrs \tmp, tpidr_el1
alternative_else alternative_else
......
...@@ -20,8 +20,12 @@ ...@@ -20,8 +20,12 @@
#define CTR_L1IP_SHIFT 14 #define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3 #define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24 #define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15 #define CTR_CWG_MASK 15
#define CTR_IDC_SHIFT 28
#define CTR_DIC_SHIFT 29
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
......
...@@ -133,6 +133,9 @@ extern void flush_dcache_page(struct page *); ...@@ -133,6 +133,9 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void) static inline void __flush_icache_all(void)
{ {
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return;
asm("ic ialluis"); asm("ic ialluis");
dsb(ish); dsb(ish);
} }
......
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
#ifndef __ASM_CMPXCHG_H #ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H #define __ASM_CMPXCHG_H
#include <linux/bug.h> #include <linux/build_bug.h>
#include <linux/compiler.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/barrier.h> #include <asm/barrier.h>
...@@ -196,32 +197,6 @@ __CMPXCHG_GEN(_mb) ...@@ -196,32 +197,6 @@ __CMPXCHG_GEN(_mb)
__ret; \ __ret; \
}) })
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
preempt_disable(); \
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
preempt_enable(); \
__ret; \
})
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
preempt_disable(); \
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2); \
preempt_enable(); \
__ret; \
})
#define __CMPWAIT_CASE(w, sz, name) \ #define __CMPWAIT_CASE(w, sz, name) \
static inline void __cmpwait_case_##name(volatile void *ptr, \ static inline void __cmpwait_case_##name(volatile void *ptr, \
unsigned long val) \ unsigned long val) \
......
...@@ -45,7 +45,11 @@ ...@@ -45,7 +45,11 @@
#define ARM64_HARDEN_BRANCH_PREDICTOR 24 #define ARM64_HARDEN_BRANCH_PREDICTOR 24
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
#define ARM64_HAS_RAS_EXTN 26 #define ARM64_HAS_RAS_EXTN 26
#define ARM64_WORKAROUND_843419 27
#define ARM64_HAS_CACHE_IDC 28
#define ARM64_HAS_CACHE_DIC 29
#define ARM64_HW_DBM 30
#define ARM64_NCAPS 27 #define ARM64_NCAPS 31
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define __ASM_CPUFEATURE_H #define __ASM_CPUFEATURE_H
#include <asm/cpucaps.h> #include <asm/cpucaps.h>
#include <asm/cputype.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
...@@ -89,24 +90,231 @@ struct arm64_ftr_reg { ...@@ -89,24 +90,231 @@ struct arm64_ftr_reg {
extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
/* scope of capability check */ /*
enum { * CPU capabilities:
SCOPE_SYSTEM, *
SCOPE_LOCAL_CPU, * We use arm64_cpu_capabilities to represent system features, errata work
}; * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
* ELF HWCAPs (which are exposed to user).
*
* To support systems with heterogeneous CPUs, we need to make sure that we
* detect the capabilities correctly on the system and take appropriate
* measures to ensure there are no incompatibilities.
*
* This comment tries to explain how we treat the capabilities.
* Each capability has the following list of attributes :
*
* 1) Scope of Detection : The system detects a given capability by
* performing some checks at runtime. This could be, e.g, checking the
* value of a field in CPU ID feature register or checking the cpu
* model. The capability provides a call back ( @matches() ) to
* perform the check. Scope defines how the checks should be performed.
* There are three cases:
*
* a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
* matches. This implies, we have to run the check on all the
* booting CPUs, until the system decides that state of the
* capability is finalised. (See section 2 below)
* Or
* b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
* matches. This implies, we run the check only once, when the
* system decides to finalise the state of the capability. If the
* capability relies on a field in one of the CPU ID feature
* registers, we use the sanitised value of the register from the
* CPU feature infrastructure to make the decision.
* Or
* c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
* feature. This category is for features that are "finalised"
* (or used) by the kernel very early even before the SMP cpus
* are brought up.
*
* The process of detection is usually denoted by "update" capability
* state in the code.
*
* 2) Finalise the state : The kernel should finalise the state of a
* capability at some point during its execution and take necessary
* actions if any. Usually, this is done, after all the boot-time
* enabled CPUs are brought up by the kernel, so that it can make
* better decision based on the available set of CPUs. However, there
* are some special cases, where the action is taken during the early
* boot by the primary boot CPU. (e.g, running the kernel at EL2 with
* Virtualisation Host Extensions). The kernel usually disallows any
* changes to the state of a capability once it finalises the capability
* and takes any action, as it may be impossible to execute the actions
* safely. A CPU brought up after a capability is "finalised" is
* referred to as "Late CPU" w.r.t the capability. e.g, all secondary
* CPUs are treated "late CPUs" for capabilities determined by the boot
* CPU.
*
* At the moment there are two passes of finalising the capabilities.
* a) Boot CPU scope capabilities - Finalised by primary boot CPU via
* setup_boot_cpu_capabilities().
* b) Everything except (a) - Run via setup_system_capabilities().
*
* 3) Verification: When a CPU is brought online (e.g, by user or by the
* kernel), the kernel should make sure that it is safe to use the CPU,
* by verifying that the CPU is compliant with the state of the
* capabilities finalised already. This happens via :
*
* secondary_start_kernel()-> check_local_cpu_capabilities()
*
* As explained in (2) above, capabilities could be finalised at
* different points in the execution. Each newly booted CPU is verified
* against the capabilities that have been finalised by the time it
* boots.
*
* a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
* except for the primary boot CPU.
*
* b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
* user after the kernel boot are verified against the capability.
*
* If there is a conflict, the kernel takes an action, based on the
* severity (e.g, a CPU could be prevented from booting or cause a
* kernel panic). The CPU is allowed to "affect" the state of the
* capability, if it has not been finalised already. See section 5
* for more details on conflicts.
*
* 4) Action: As mentioned in (2), the kernel can take an action for each
* detected capability, on all CPUs on the system. Appropriate actions
* include, turning on an architectural feature, modifying the control
* registers (e.g, SCTLR, TCR etc.) or patching the kernel via
* alternatives. The kernel patching is batched and performed at later
* point. The actions are always initiated only after the capability
* is finalised. This is usally denoted by "enabling" the capability.
* The actions are initiated as follows :
* a) Action is triggered on all online CPUs, after the capability is
* finalised, invoked within the stop_machine() context from
* enable_cpu_capabilitie().
*
* b) Any late CPU, brought up after (1), the action is triggered via:
*
* check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
*
* 5) Conflicts: Based on the state of the capability on a late CPU vs.
* the system state, we could have the following combinations :
*
* x-----------------------------x
* | Type | System | Late CPU |
* |-----------------------------|
* | a | y | n |
* |-----------------------------|
* | b | n | y |
* x-----------------------------x
*
* Two separate flag bits are defined to indicate whether each kind of
* conflict can be allowed:
* ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
* ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
*
* Case (a) is not permitted for a capability that the system requires
* all CPUs to have in order for the capability to be enabled. This is
* typical for capabilities that represent enhanced functionality.
*
* Case (b) is not permitted for a capability that must be enabled
* during boot if any CPU in the system requires it in order to run
* safely. This is typical for erratum work arounds that cannot be
* enabled after the corresponding capability is finalised.
*
* In some non-typical cases either both (a) and (b), or neither,
* should be permitted. This can be described by including neither
* or both flags in the capability's type field.
*/
/*
* Decide how the capability is detected.
* On any local CPU vs System wide vs the primary boot CPU
*/
#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
/*
* The capabilitiy is detected on the Boot CPU and is used by kernel
* during early boot. i.e, the capability should be "detected" and
* "enabled" as early as possibly on all booting CPUs.
*/
#define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
#define ARM64_CPUCAP_SCOPE_MASK \
(ARM64_CPUCAP_SCOPE_SYSTEM | \
ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
ARM64_CPUCAP_SCOPE_BOOT_CPU)
#define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
#define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
#define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
#define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
/*
* Is it permitted for a late CPU to have this capability when system
* hasn't already enabled it ?
*/
#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
/* Is it safe for a late CPU to miss this capability when system has it */
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
/*
* CPU errata workarounds that need to be enabled at boot time if one or
* more CPUs in the system requires it. When one of these capabilities
* has been enabled, it is safe to allow any CPU to boot that doesn't
* require the workaround. However, it is not safe if a "late" CPU
* requires a workaround and the system hasn't enabled it already.
*/
#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
/*
* CPU feature detected at boot time based on system-wide value of a
* feature. It is safe for a late CPU to have this feature even though
* the system hasn't enabled it, although the featuer will not be used
* by Linux in this case. If the system has enabled this feature already,
* then every late CPU must have it.
*/
#define ARM64_CPUCAP_SYSTEM_FEATURE \
(ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
/*
* CPU feature detected at boot time based on feature of one or more CPUs.
* All possible conflicts for a late CPU are ignored.
*/
#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
/*
* CPU feature detected at boot time, on one or more CPUs. A late CPU
* is not allowed to have the capability when the system doesn't have it.
* It is Ok for a late CPU to miss the feature.
*/
#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
/*
* CPU feature used early in the boot based on the boot CPU. All secondary
* CPUs must match the state of the capability as detected by the boot CPU.
*/
#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
struct arm64_cpu_capabilities { struct arm64_cpu_capabilities {
const char *desc; const char *desc;
u16 capability; u16 capability;
int def_scope; /* default scope */ u16 type;
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
int (*enable)(void *); /* Called on all active CPUs */ /*
* Take the appropriate actions to enable this capability for this CPU.
* For each successfully booted CPU, this method is called for each
* globally detected capability.
*/
void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
union { union {
struct { /* To be used for erratum handling only */ struct { /* To be used for erratum handling only */
u32 midr_model; struct midr_range midr_range;
u32 midr_range_min, midr_range_max; const struct arm64_midr_revidr {
u32 midr_rv; /* revision/variant */
u32 revidr_mask;
} * const fixed_revs;
}; };
const struct midr_range *midr_range_list;
struct { /* Feature register checking */ struct { /* Feature register checking */
u32 sys_reg; u32 sys_reg;
u8 field_pos; u8 field_pos;
...@@ -115,9 +323,38 @@ struct arm64_cpu_capabilities { ...@@ -115,9 +323,38 @@ struct arm64_cpu_capabilities {
bool sign; bool sign;
unsigned long hwcap; unsigned long hwcap;
}; };
/*
* A list of "matches/cpu_enable" pair for the same
* "capability" of the same "type" as described by the parent.
* Only matches(), cpu_enable() and fields relevant to these
* methods are significant in the list. The cpu_enable is
* invoked only if the corresponding entry "matches()".
* However, if a cpu_enable() method is associated
* with multiple matches(), care should be taken that either
* the match criteria are mutually exclusive, or that the
* method is robust against being called multiple times.
*/
const struct arm64_cpu_capabilities *match_list;
}; };
}; };
static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
{
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
}
static inline bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
}
static inline bool
cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
{
return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
}
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready; extern struct static_key_false arm64_const_caps_ready;
...@@ -236,15 +473,8 @@ static inline bool id_aa64pfr0_sve(u64 pfr0) ...@@ -236,15 +473,8 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
} }
void __init setup_cpu_features(void); void __init setup_cpu_features(void);
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info);
void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
void check_local_cpu_capabilities(void); void check_local_cpu_capabilities(void);
void update_cpu_errata_workarounds(void);
void __init enable_errata_workarounds(void);
void verify_local_cpu_errata_workarounds(void);
u64 read_sanitised_ftr_reg(u32 id); u64 read_sanitised_ftr_reg(u32 id);
......
...@@ -83,6 +83,8 @@ ...@@ -83,6 +83,8 @@
#define ARM_CPU_PART_CORTEX_A53 0xD03 #define ARM_CPU_PART_CORTEX_A53 0xD03
#define ARM_CPU_PART_CORTEX_A73 0xD09 #define ARM_CPU_PART_CORTEX_A73 0xD09
#define ARM_CPU_PART_CORTEX_A75 0xD0A #define ARM_CPU_PART_CORTEX_A75 0xD0A
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define APM_CPU_PART_POTENZA 0x000 #define APM_CPU_PART_POTENZA 0x000
...@@ -102,6 +104,8 @@ ...@@ -102,6 +104,8 @@
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
...@@ -117,6 +121,45 @@ ...@@ -117,6 +121,45 @@
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
/*
* Represent a range of MIDR values for a given CPU model and a
* range of variant/revision values.
*
* @model - CPU model as defined by MIDR_CPU_MODEL
* @rv_min - Minimum value for the revision/variant as defined by
* MIDR_CPU_VAR_REV
* @rv_max - Maximum value for the variant/revision for the range.
*/
struct midr_range {
u32 model;
u32 rv_min;
u32 rv_max;
};
#define MIDR_RANGE(m, v_min, r_min, v_max, r_max) \
{ \
.model = m, \
.rv_min = MIDR_CPU_VAR_REV(v_min, r_min), \
.rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \
}
#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
{
return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
range->rv_min, range->rv_max);
}
static inline bool
is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
{
while (ranges->model)
if (is_midr_in_range(midr, ranges++))
return true;
return false;
}
/* /*
* The CPU ID never changes at run time, so we might as well tell the * The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID * compiler that it's constant. Use this function to read the CPU ID
......
...@@ -240,6 +240,15 @@ ...@@ -240,6 +240,15 @@
(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \ (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
ESR_ELx_SYS64_ISS_OP2_SHIFT)) ESR_ELx_SYS64_ISS_OP2_SHIFT))
/*
* ISS field definitions for floating-point exception traps
* (FP_EXC_32/FP_EXC_64).
*
* (The FPEXC_* constants are used instead for common bits.)
*/
#define ESR_ELx_FP_EXC_TFV (UL(1) << 23)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/types.h> #include <asm/types.h>
......
...@@ -22,33 +22,9 @@ ...@@ -22,33 +22,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/init.h>
#include <linux/stddef.h> #include <linux/stddef.h>
/*
* FP/SIMD storage area has:
* - FPSR and FPCR
* - 32 128-bit data registers
*
* Note that user_fpsimd forms a prefix of this structure, which is
* relied upon in the ptrace FP/SIMD accessors.
*/
struct fpsimd_state {
union {
struct user_fpsimd_state user_fpsimd;
struct {
__uint128_t vregs[32];
u32 fpsr;
u32 fpcr;
/*
* For ptrace compatibility, pad to next 128-bit
* boundary here if extending this struct.
*/
};
};
/* the id of the last cpu to have restored this state */
unsigned int cpu;
};
#if defined(__KERNEL__) && defined(CONFIG_COMPAT) #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */ /* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f #define VFP_FPSCR_STAT_MASK 0xf800009f
...@@ -62,8 +38,8 @@ struct fpsimd_state { ...@@ -62,8 +38,8 @@ struct fpsimd_state {
struct task_struct; struct task_struct;
extern void fpsimd_save_state(struct fpsimd_state *state); extern void fpsimd_save_state(struct user_fpsimd_state *state);
extern void fpsimd_load_state(struct fpsimd_state *state); extern void fpsimd_load_state(struct user_fpsimd_state *state);
extern void fpsimd_thread_switch(struct task_struct *next); extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void); extern void fpsimd_flush_thread(void);
...@@ -83,7 +59,9 @@ extern void sve_save_state(void *state, u32 *pfpsr); ...@@ -83,7 +59,9 @@ extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr, extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1); unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void); extern unsigned int sve_get_vl(void);
extern int sve_kernel_enable(void *);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern int __ro_after_init sve_max_vl; extern int __ro_after_init sve_max_vl;
......
...@@ -4,8 +4,11 @@ ...@@ -4,8 +4,11 @@
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
#include <linux/compiler_types.h>
#include <linux/export.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpucaps.h>
#ifdef __ASSEMBLER__ #ifdef __ASSEMBLER__
......
...@@ -39,6 +39,8 @@ struct mod_arch_specific { ...@@ -39,6 +39,8 @@ struct mod_arch_specific {
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym); Elf64_Sym *sym);
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val);
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base; extern u64 module_alloc_base;
#else #else
......
...@@ -16,7 +16,10 @@ ...@@ -16,7 +16,10 @@
#ifndef __ASM_PERCPU_H #ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H #define __ASM_PERCPU_H
#include <linux/preempt.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/stack_pointer.h> #include <asm/stack_pointer.h>
static inline void set_my_cpu_offset(unsigned long off) static inline void set_my_cpu_offset(unsigned long off)
...@@ -197,6 +200,32 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, ...@@ -197,6 +200,32 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return ret; return ret;
} }
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
preempt_disable(); \
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
preempt_enable(); \
__ret; \
})
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
preempt_disable(); \
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2); \
preempt_enable(); \
__ret; \
})
#define _percpu_read(pcp) \ #define _percpu_read(pcp) \
({ \ ({ \
typeof(pcp) __retval; \ typeof(pcp) __retval; \
......
...@@ -291,6 +291,7 @@ ...@@ -291,6 +291,7 @@
#define TCR_TBI0 (UL(1) << 37) #define TCR_TBI0 (UL(1) << 37)
#define TCR_HA (UL(1) << 39) #define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40) #define TCR_HD (UL(1) << 40)
#define TCR_NFD1 (UL(1) << 54)
/* /*
* TTBR. * TTBR.
......
...@@ -34,10 +34,12 @@ ...@@ -34,10 +34,12 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/build_bug.h>
#include <linux/stddef.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/fpsimd.h> #include <asm/cpufeature.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
#include <asm/lse.h> #include <asm/lse.h>
#include <asm/pgtable-hwdef.h> #include <asm/pgtable-hwdef.h>
...@@ -103,11 +105,19 @@ struct cpu_context { ...@@ -103,11 +105,19 @@ struct cpu_context {
struct thread_struct { struct thread_struct {
struct cpu_context cpu_context; /* cpu context */ struct cpu_context cpu_context; /* cpu context */
unsigned long tp_value; /* TLS register */
#ifdef CONFIG_COMPAT /*
unsigned long tp2_value; * Whitelisted fields for hardened usercopy:
#endif * Maintainers must ensure manually that this contains no
struct fpsimd_state fpsimd_state; * implicit padding.
*/
struct {
unsigned long tp_value; /* TLS register */
unsigned long tp2_value;
struct user_fpsimd_state fpsimd_state;
} uw;
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */ void *sve_state; /* SVE registers, if any */
unsigned int sve_vl; /* SVE vector length */ unsigned int sve_vl; /* SVE vector length */
unsigned int sve_vl_onexec; /* SVE vl after next exec */ unsigned int sve_vl_onexec; /* SVE vl after next exec */
...@@ -116,14 +126,17 @@ struct thread_struct { ...@@ -116,14 +126,17 @@ struct thread_struct {
struct debug_info debug; /* debugging */ struct debug_info debug; /* debugging */
}; };
/*
* Everything usercopied to/from thread_struct is statically-sized, so
* no hardened usercopy whitelist is needed.
*/
static inline void arch_thread_struct_whitelist(unsigned long *offset, static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size) unsigned long *size)
{ {
*offset = *size = 0; /* Verify that there is no padding among the whitelisted fields: */
BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
sizeof_field(struct thread_struct, uw.tp_value) +
sizeof_field(struct thread_struct, uw.tp2_value) +
sizeof_field(struct thread_struct, uw.fpsimd_state));
*offset = offsetof(struct thread_struct, uw);
*size = sizeof_field(struct thread_struct, uw);
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -131,13 +144,13 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, ...@@ -131,13 +144,13 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
({ \ ({ \
unsigned long *__tls; \ unsigned long *__tls; \
if (is_compat_thread(task_thread_info(t))) \ if (is_compat_thread(task_thread_info(t))) \
__tls = &(t)->thread.tp2_value; \ __tls = &(t)->thread.uw.tp2_value; \
else \ else \
__tls = &(t)->thread.tp_value; \ __tls = &(t)->thread.uw.tp_value; \
__tls; \ __tls; \
}) })
#else #else
#define task_user_tls(t) (&(t)->thread.tp_value) #define task_user_tls(t) (&(t)->thread.uw.tp_value)
#endif #endif
/* Sync TPIDR_EL0 back to thread_struct for current */ /* Sync TPIDR_EL0 back to thread_struct for current */
...@@ -227,9 +240,9 @@ static inline void spin_lock_prefetch(const void *ptr) ...@@ -227,9 +240,9 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif #endif
int cpu_enable_pan(void *__unused); void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
int cpu_enable_cache_maint_trap(void *__unused); void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
int cpu_clear_disr(void *__unused); void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_SET_VL(arg) sve_set_current_vl(arg)
......
...@@ -490,6 +490,7 @@ ...@@ -490,6 +490,7 @@
#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0) #define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
/* id_aa64isar0 */ /* id_aa64isar0 */
#define ID_AA64ISAR0_TS_SHIFT 52
#define ID_AA64ISAR0_FHM_SHIFT 48 #define ID_AA64ISAR0_FHM_SHIFT 48
#define ID_AA64ISAR0_DP_SHIFT 44 #define ID_AA64ISAR0_DP_SHIFT 44
#define ID_AA64ISAR0_SM4_SHIFT 40 #define ID_AA64ISAR0_SM4_SHIFT 40
...@@ -511,6 +512,7 @@ ...@@ -511,6 +512,7 @@
/* id_aa64pfr0 */ /* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56 #define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_DIT_SHIFT 48
#define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_RAS_SHIFT 28 #define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_GIC_SHIFT 24
...@@ -568,6 +570,7 @@ ...@@ -568,6 +570,7 @@
#define ID_AA64MMFR1_VMIDBITS_16 2 #define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */ /* id_aa64mmfr2 */
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_LVA_SHIFT 16 #define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12 #define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8 #define ID_AA64MMFR2_LSM_SHIFT 8
......
...@@ -45,17 +45,6 @@ extern void __show_regs(struct pt_regs *); ...@@ -45,17 +45,6 @@ extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
#define show_unhandled_signals_ratelimited() \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
bool __show_ratelimited = false; \
if (show_unhandled_signals && __ratelimit(&_rs)) \
__show_ratelimited = true; \
__show_ratelimited; \
})
int handle_guest_sea(phys_addr_t addr, unsigned int esr); int handle_guest_sea(phys_addr_t addr, unsigned int esr);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -60,6 +60,15 @@ ...@@ -60,6 +60,15 @@
__tlbi(op, (arg) | USER_ASID_FLAG); \ __tlbi(op, (arg) | USER_ASID_FLAG); \
} while (0) } while (0)
/* This macro creates a properly formatted VA operand for the TLBI */
#define __TLBI_VADDR(addr, asid) \
({ \
unsigned long __ta = (addr) >> 12; \
__ta &= GENMASK_ULL(43, 0); \
__ta |= (unsigned long)(asid) << 48; \
__ta; \
})
/* /*
* TLB Management * TLB Management
* ============== * ==============
...@@ -117,7 +126,7 @@ static inline void flush_tlb_all(void) ...@@ -117,7 +126,7 @@ static inline void flush_tlb_all(void)
static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm)
{ {
unsigned long asid = ASID(mm) << 48; unsigned long asid = __TLBI_VADDR(0, ASID(mm));
dsb(ishst); dsb(ishst);
__tlbi(aside1is, asid); __tlbi(aside1is, asid);
...@@ -128,7 +137,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm) ...@@ -128,7 +137,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long uaddr) unsigned long uaddr)
{ {
unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst); dsb(ishst);
__tlbi(vale1is, addr); __tlbi(vale1is, addr);
...@@ -146,7 +155,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, ...@@ -146,7 +155,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, unsigned long start, unsigned long end,
bool last_level) bool last_level)
{ {
unsigned long asid = ASID(vma->vm_mm) << 48; unsigned long asid = ASID(vma->vm_mm);
unsigned long addr; unsigned long addr;
if ((end - start) > MAX_TLB_RANGE) { if ((end - start) > MAX_TLB_RANGE) {
...@@ -154,8 +163,8 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, ...@@ -154,8 +163,8 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
return; return;
} }
start = asid | (start >> 12); start = __TLBI_VADDR(start, asid);
end = asid | (end >> 12); end = __TLBI_VADDR(end, asid);
dsb(ishst); dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
...@@ -185,8 +194,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end ...@@ -185,8 +194,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
return; return;
} }
start >>= 12; start = __TLBI_VADDR(start, 0);
end >>= 12; end = __TLBI_VADDR(end, 0);
dsb(ishst); dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
...@@ -202,7 +211,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end ...@@ -202,7 +211,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
static inline void __flush_tlb_pgtable(struct mm_struct *mm, static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long uaddr) unsigned long uaddr)
{ {
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
__tlbi(vae1is, addr); __tlbi(vae1is, addr);
__tlbi_user(vae1is, addr); __tlbi_user(vae1is, addr);
......
...@@ -35,10 +35,10 @@ struct undef_hook { ...@@ -35,10 +35,10 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook); void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook);
void force_signal_inject(int signal, int code, struct pt_regs *regs, void force_signal_inject(int signal, int code, unsigned long address);
unsigned long address); void arm64_notify_segfault(unsigned long addr);
void arm64_force_sig_info(struct siginfo *info, const char *str,
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr); struct task_struct *tsk);
/* /*
* Move regs->pc to next instruction and do necessary setup before it * Move regs->pc to next instruction and do necessary setup before it
......
...@@ -102,12 +102,6 @@ static inline bool has_vhe(void) ...@@ -102,12 +102,6 @@ static inline bool has_vhe(void)
return false; return false;
} }
#ifdef CONFIG_ARM64_VHE
extern void verify_cpu_run_el(void);
#else
static inline void verify_cpu_run_el(void) {}
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */ #endif /* ! __ASM__VIRT_H */
...@@ -44,5 +44,9 @@ ...@@ -44,5 +44,9 @@
#define HWCAP_SHA512 (1 << 21) #define HWCAP_SHA512 (1 << 21)
#define HWCAP_SVE (1 << 22) #define HWCAP_SVE (1 << 22)
#define HWCAP_ASIMDFHM (1 << 23) #define HWCAP_ASIMDFHM (1 << 23)
#define HWCAP_DIT (1 << 24)
#define HWCAP_USCAT (1 << 25)
#define HWCAP_ILRCPC (1 << 26)
#define HWCAP_FLAGM (1 << 27)
#endif /* _UAPI__ASM_HWCAP_H */ #endif /* _UAPI__ASM_HWCAP_H */
...@@ -21,25 +21,4 @@ ...@@ -21,25 +21,4 @@
#include <asm-generic/siginfo.h> #include <asm-generic/siginfo.h>
/*
* SIGFPE si_codes
*/
#ifdef __KERNEL__
#define FPE_FIXME 0 /* Broken dup of SI_USER */
#endif /* __KERNEL__ */
/*
* SIGBUS si_codes
*/
#ifdef __KERNEL__
#define BUS_FIXME 0 /* Broken dup of SI_USER */
#endif /* __KERNEL__ */
/*
* SIGTRAP si_codes
*/
#ifdef __KERNEL__
#define TRAP_FIXME 0 /* Broken dup of SI_USER */
#endif /* __KERNEL__ */
#endif #endif
...@@ -429,7 +429,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr) ...@@ -429,7 +429,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
fault: fault:
pr_debug("SWP{B} emulation: access caused memory abort!\n"); pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm64_notify_segfault(regs, address); arm64_notify_segfault(address);
return 0; return 0;
} }
......
...@@ -23,11 +23,29 @@ ...@@ -23,11 +23,29 @@
static bool __maybe_unused static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
{
const struct arm64_midr_revidr *fix;
u32 midr = read_cpuid_id(), revidr;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
if (!is_midr_in_range(midr, &entry->midr_range))
return false;
midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
revidr = read_cpuid(REVIDR_EL1);
for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
return false;
return true;
}
static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
int scope)
{ {
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model, return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
entry->midr_range_min,
entry->midr_range_max);
} }
static bool __maybe_unused static bool __maybe_unused
...@@ -41,7 +59,7 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) ...@@ -41,7 +59,7 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
MIDR_ARCHITECTURE_MASK; MIDR_ARCHITECTURE_MASK;
return model == entry->midr_model; return model == entry->midr_range.model;
} }
static bool static bool
...@@ -53,11 +71,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, ...@@ -53,11 +71,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
} }
static int cpu_enable_trap_ctr_access(void *__unused) static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
{ {
/* Clear SCTLR_EL1.UCT */ /* Clear SCTLR_EL1.UCT */
config_sctlr_el1(SCTLR_EL1_UCT, 0); config_sctlr_el1(SCTLR_EL1_UCT, 0);
return 0;
} }
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
...@@ -161,25 +179,25 @@ static void call_hvc_arch_workaround_1(void) ...@@ -161,25 +179,25 @@ static void call_hvc_arch_workaround_1(void)
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
} }
static int enable_smccc_arch_workaround_1(void *data) static void
enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
{ {
const struct arm64_cpu_capabilities *entry = data;
bp_hardening_cb_t cb; bp_hardening_cb_t cb;
void *smccc_start, *smccc_end; void *smccc_start, *smccc_end;
struct arm_smccc_res res; struct arm_smccc_res res;
if (!entry->matches(entry, SCOPE_LOCAL_CPU)) if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return 0; return;
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
return 0; return;
switch (psci_ops.conduit) { switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC: case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res); ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 < 0) if ((int)res.a0 < 0)
return 0; return;
cb = call_hvc_arch_workaround_1; cb = call_hvc_arch_workaround_1;
smccc_start = __smccc_workaround_1_hvc_start; smccc_start = __smccc_workaround_1_hvc_start;
smccc_end = __smccc_workaround_1_hvc_end; smccc_end = __smccc_workaround_1_hvc_end;
...@@ -189,19 +207,19 @@ static int enable_smccc_arch_workaround_1(void *data) ...@@ -189,19 +207,19 @@ static int enable_smccc_arch_workaround_1(void *data)
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res); ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 < 0) if ((int)res.a0 < 0)
return 0; return;
cb = call_smc_arch_workaround_1; cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start; smccc_start = __smccc_workaround_1_smc_start;
smccc_end = __smccc_workaround_1_smc_end; smccc_end = __smccc_workaround_1_smc_end;
break; break;
default: default:
return 0; return;
} }
install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
return 0; return;
} }
static void qcom_link_stack_sanitization(void) static void qcom_link_stack_sanitization(void)
...@@ -216,31 +234,119 @@ static void qcom_link_stack_sanitization(void) ...@@ -216,31 +234,119 @@ static void qcom_link_stack_sanitization(void)
: "=&r" (tmp)); : "=&r" (tmp));
} }
static int qcom_enable_link_stack_sanitization(void *data) static void
qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
{ {
const struct arm64_cpu_capabilities *entry = data;
install_bp_hardening_cb(entry, qcom_link_stack_sanitization, install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
__qcom_hyp_sanitize_link_stack_start, __qcom_hyp_sanitize_link_stack_start,
__qcom_hyp_sanitize_link_stack_end); __qcom_hyp_sanitize_link_stack_end);
return 0;
} }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
#define MIDR_RANGE(model, min, max) \ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.def_scope = SCOPE_LOCAL_CPU, \ .matches = is_affected_midr_range, \
.matches = is_affected_midr_range, \ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
.midr_model = model, \
.midr_range_min = min, \ #define CAP_MIDR_ALL_VERSIONS(model) \
.midr_range_max = max .matches = is_affected_midr_range, \
.midr_range = MIDR_ALL_VERSIONS(model)
#define MIDR_FIXED(rev, revidr_mask) \
.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
#define CAP_MIDR_RANGE_LIST(list) \
.matches = is_affected_midr_range_list, \
.midr_range_list = list
/* Errata affecting a range of revisions of given model variant */
#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
/* Errata affecting a single variant/revision of a model */
#define ERRATA_MIDR_REV(model, var, rev) \
ERRATA_MIDR_RANGE(model, var, rev, var, rev)
/* Errata affecting all variants/revisions of a given a model */
#define ERRATA_MIDR_ALL_VERSIONS(model) \
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_ALL_VERSIONS(model)
/* Errata affecting a list of midr ranges, with same work around */
#define ERRATA_MIDR_RANGE_LIST(midr_list) \
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
/*
* Generic helper for handling capabilties with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
* Iterate over each entry to see if at least one matches.
*/
static bool __maybe_unused
multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
{
const struct arm64_cpu_capabilities *caps;
for (caps = entry->match_list; caps->matches; caps++)
if (caps->matches(caps, scope))
return true;
return false;
}
/*
* Take appropriate action for all matching entries in the shared capability
* entry.
*/
static void __maybe_unused
multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
{
const struct arm64_cpu_capabilities *caps;
#define MIDR_ALL_VERSIONS(model) \ for (caps = entry->match_list; caps->matches; caps++)
.def_scope = SCOPE_LOCAL_CPU, \ if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
.matches = is_affected_midr_range, \ caps->cpu_enable)
.midr_model = model, \ caps->cpu_enable(caps);
.midr_range_min = 0, \ }
.midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
/*
* List of CPUs where we need to issue a psci call to
* harden the branch predictor.
*/
static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
{},
};
static const struct midr_range qcom_bp_harden_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
{},
};
static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = {
{
CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
.cpu_enable = enable_smccc_arch_workaround_1,
},
{
CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
.cpu_enable = qcom_enable_link_stack_sanitization,
},
{},
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = { const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \ #if defined(CONFIG_ARM64_ERRATUM_826319) || \
...@@ -250,8 +356,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -250,8 +356,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A53 r0p[012] */ /* Cortex-A53 r0p[012] */
.desc = "ARM errata 826319, 827319, 824069", .desc = "ARM errata 826319, 827319, 824069",
.capability = ARM64_WORKAROUND_CLEAN_CACHE, .capability = ARM64_WORKAROUND_CLEAN_CACHE,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
.enable = cpu_enable_cache_maint_trap, .cpu_enable = cpu_enable_cache_maint_trap,
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_819472 #ifdef CONFIG_ARM64_ERRATUM_819472
...@@ -259,8 +365,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -259,8 +365,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A53 r0p[01] */ /* Cortex-A53 r0p[01] */
.desc = "ARM errata 819472", .desc = "ARM errata 819472",
.capability = ARM64_WORKAROUND_CLEAN_CACHE, .capability = ARM64_WORKAROUND_CLEAN_CACHE,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
.enable = cpu_enable_cache_maint_trap, .cpu_enable = cpu_enable_cache_maint_trap,
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_832075 #ifdef CONFIG_ARM64_ERRATUM_832075
...@@ -268,9 +374,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -268,9 +374,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A57 r0p0 - r1p2 */ /* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 832075", .desc = "ARM erratum 832075",
.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
MIDR_RANGE(MIDR_CORTEX_A57, ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
MIDR_CPU_VAR_REV(0, 0), 0, 0,
MIDR_CPU_VAR_REV(1, 2)), 1, 2),
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_834220 #ifdef CONFIG_ARM64_ERRATUM_834220
...@@ -278,9 +384,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -278,9 +384,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A57 r0p0 - r1p2 */ /* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 834220", .desc = "ARM erratum 834220",
.capability = ARM64_WORKAROUND_834220, .capability = ARM64_WORKAROUND_834220,
MIDR_RANGE(MIDR_CORTEX_A57, ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
MIDR_CPU_VAR_REV(0, 0), 0, 0,
MIDR_CPU_VAR_REV(1, 2)), 1, 2),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_843419
{
/* Cortex-A53 r0p[01234] */
.desc = "ARM erratum 843419",
.capability = ARM64_WORKAROUND_843419,
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
MIDR_FIXED(0x4, BIT(8)),
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_845719 #ifdef CONFIG_ARM64_ERRATUM_845719
...@@ -288,7 +403,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -288,7 +403,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A53 r0p[01234] */ /* Cortex-A53 r0p[01234] */
.desc = "ARM erratum 845719", .desc = "ARM erratum 845719",
.capability = ARM64_WORKAROUND_845719, .capability = ARM64_WORKAROUND_845719,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
}, },
#endif #endif
#ifdef CONFIG_CAVIUM_ERRATUM_23154 #ifdef CONFIG_CAVIUM_ERRATUM_23154
...@@ -296,7 +411,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -296,7 +411,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cavium ThunderX, pass 1.x */ /* Cavium ThunderX, pass 1.x */
.desc = "Cavium erratum 23154", .desc = "Cavium erratum 23154",
.capability = ARM64_WORKAROUND_CAVIUM_23154, .capability = ARM64_WORKAROUND_CAVIUM_23154,
MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
}, },
#endif #endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456 #ifdef CONFIG_CAVIUM_ERRATUM_27456
...@@ -304,15 +419,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -304,15 +419,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cavium ThunderX, T88 pass 1.x - 2.1 */ /* Cavium ThunderX, T88 pass 1.x - 2.1 */
.desc = "Cavium erratum 27456", .desc = "Cavium erratum 27456",
.capability = ARM64_WORKAROUND_CAVIUM_27456, .capability = ARM64_WORKAROUND_CAVIUM_27456,
MIDR_RANGE(MIDR_THUNDERX, ERRATA_MIDR_RANGE(MIDR_THUNDERX,
MIDR_CPU_VAR_REV(0, 0), 0, 0,
MIDR_CPU_VAR_REV(1, 1)), 1, 1),
}, },
{ {
/* Cavium ThunderX, T81 pass 1.0 */ /* Cavium ThunderX, T81 pass 1.0 */
.desc = "Cavium erratum 27456", .desc = "Cavium erratum 27456",
.capability = ARM64_WORKAROUND_CAVIUM_27456, .capability = ARM64_WORKAROUND_CAVIUM_27456,
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00), ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
}, },
#endif #endif
#ifdef CONFIG_CAVIUM_ERRATUM_30115 #ifdef CONFIG_CAVIUM_ERRATUM_30115
...@@ -320,42 +435,41 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -320,42 +435,41 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cavium ThunderX, T88 pass 1.x - 2.2 */ /* Cavium ThunderX, T88 pass 1.x - 2.2 */
.desc = "Cavium erratum 30115", .desc = "Cavium erratum 30115",
.capability = ARM64_WORKAROUND_CAVIUM_30115, .capability = ARM64_WORKAROUND_CAVIUM_30115,
MIDR_RANGE(MIDR_THUNDERX, 0x00, ERRATA_MIDR_RANGE(MIDR_THUNDERX,
(1 << MIDR_VARIANT_SHIFT) | 2), 0, 0,
1, 2),
}, },
{ {
/* Cavium ThunderX, T81 pass 1.0 - 1.2 */ /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
.desc = "Cavium erratum 30115", .desc = "Cavium erratum 30115",
.capability = ARM64_WORKAROUND_CAVIUM_30115, .capability = ARM64_WORKAROUND_CAVIUM_30115,
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02), ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
}, },
{ {
/* Cavium ThunderX, T83 pass 1.0 */ /* Cavium ThunderX, T83 pass 1.0 */
.desc = "Cavium erratum 30115", .desc = "Cavium erratum 30115",
.capability = ARM64_WORKAROUND_CAVIUM_30115, .capability = ARM64_WORKAROUND_CAVIUM_30115,
MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00), ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
}, },
#endif #endif
{ {
.desc = "Mismatched cache line size", .desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
.matches = has_mismatched_cache_line_size, .matches = has_mismatched_cache_line_size,
.def_scope = SCOPE_LOCAL_CPU, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.enable = cpu_enable_trap_ctr_access, .cpu_enable = cpu_enable_trap_ctr_access,
}, },
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
{ {
.desc = "Qualcomm Technologies Falkor erratum 1003", .desc = "Qualcomm Technologies Falkor erratum 1003",
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
MIDR_RANGE(MIDR_QCOM_FALKOR_V1, ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(0, 0)),
}, },
{ {
.desc = "Qualcomm Technologies Kryo erratum 1003", .desc = "Qualcomm Technologies Kryo erratum 1003",
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
.def_scope = SCOPE_LOCAL_CPU, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.midr_model = MIDR_QCOM_KRYO, .midr_range.model = MIDR_QCOM_KRYO,
.matches = is_kryo_midr, .matches = is_kryo_midr,
}, },
#endif #endif
...@@ -363,9 +477,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -363,9 +477,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{ {
.desc = "Qualcomm Technologies Falkor erratum 1009", .desc = "Qualcomm Technologies Falkor erratum 1009",
.capability = ARM64_WORKAROUND_REPEAT_TLBI, .capability = ARM64_WORKAROUND_REPEAT_TLBI,
MIDR_RANGE(MIDR_QCOM_FALKOR_V1, ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(0, 0)),
}, },
#endif #endif
#ifdef CONFIG_ARM64_ERRATUM_858921 #ifdef CONFIG_ARM64_ERRATUM_858921
...@@ -373,92 +485,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -373,92 +485,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A73 all versions */ /* Cortex-A73 all versions */
.desc = "ARM erratum 858921", .desc = "ARM erratum 858921",
.capability = ARM64_WORKAROUND_858921, .capability = ARM64_WORKAROUND_858921,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
}, },
#endif #endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
{ {
.capability = ARM64_HARDEN_BRANCH_PREDICTOR, .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.enable = enable_smccc_arch_workaround_1, .matches = multi_entry_cap_matches,
}, .cpu_enable = multi_entry_cap_cpu_enable,
{ .match_list = arm64_bp_harden_list,
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
.enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
.enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
.enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
.enable = qcom_enable_link_stack_sanitization,
}, },
{ {
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
.enable = qcom_enable_link_stack_sanitization,
},
{
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
.enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
.enable = enable_smccc_arch_workaround_1,
}, },
#endif #endif
{ {
} }
}; };
/*
* The CPU Errata work arounds are detected and applied at boot time
* and the related information is freed soon after. If the new CPU requires
* an errata not detected at boot, fail this CPU.
*/
void verify_local_cpu_errata_workarounds(void)
{
const struct arm64_cpu_capabilities *caps = arm64_errata;
for (; caps->matches; caps++) {
if (cpus_have_cap(caps->capability)) {
if (caps->enable)
caps->enable((void *)caps);
} else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
pr_crit("CPU%d: Requires work around for %s, not detected"
" at boot time\n",
smp_processor_id(),
caps->desc ? : "an erratum");
cpu_die_early();
}
}
}
void update_cpu_errata_workarounds(void)
{
update_cpu_capabilities(arm64_errata, "enabling workaround for");
}
void __init enable_errata_workarounds(void)
{
enable_cpu_capabilities(arm64_errata);
}
...@@ -123,6 +123,7 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) ...@@ -123,6 +123,7 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
* sync with the documentation of the CPU feature register ABI. * sync with the documentation of the CPU feature register ABI.
*/ */
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
...@@ -148,6 +149,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ...@@ -148,6 +149,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
...@@ -190,6 +192,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ...@@ -190,6 +192,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
...@@ -199,12 +202,12 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { ...@@ -199,12 +202,12 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
}; };
static const struct arm64_ftr_bits ftr_ctr[] = { static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/* /*
* Linux can handle differing I-cache policies. Userspace JITs will * Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine. * make use of *minLine.
...@@ -506,6 +509,9 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) ...@@ -506,6 +509,9 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
reg->user_mask = user_mask; reg->user_mask = user_mask;
} }
extern const struct arm64_cpu_capabilities arm64_errata[];
static void __init setup_boot_cpu_capabilities(void);
void __init init_cpu_features(struct cpuinfo_arm64 *info) void __init init_cpu_features(struct cpuinfo_arm64 *info)
{ {
/* Before we start using the tables, make sure it is sorted */ /* Before we start using the tables, make sure it is sorted */
...@@ -548,6 +554,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) ...@@ -548,6 +554,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
sve_init_vq_map(); sve_init_vq_map();
} }
/*
* Detect and enable early CPU capabilities based on the boot CPU,
* after we have initialised the CPU feature infrastructure.
*/
setup_boot_cpu_capabilities();
} }
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
...@@ -826,11 +838,6 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _ ...@@ -826,11 +838,6 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
} }
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
{
return is_kernel_in_hyp_mode();
}
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry, static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
int __unused) int __unused)
{ {
...@@ -852,14 +859,30 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus ...@@ -852,14 +859,30 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
ID_AA64PFR0_FP_SHIFT) < 0; ID_AA64PFR0_FP_SHIFT) < 0;
} }
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
int __unused)
{
return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT);
}
static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
int __unused)
{
return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
}
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
int __unused) int scope)
{ {
/* List of CPUs that are not vulnerable and don't need KPTI */
static const struct midr_range kpti_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
};
char const *str = "command line option"; char const *str = "command line option";
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
/* /*
* For reasons that aren't entirely clear, enabling KPTI on Cavium * For reasons that aren't entirely clear, enabling KPTI on Cavium
...@@ -883,18 +906,15 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, ...@@ -883,18 +906,15 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
return true; return true;
/* Don't force KPTI for CPUs that are not vulnerable */ /* Don't force KPTI for CPUs that are not vulnerable */
switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) { if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
case MIDR_CAVIUM_THUNDERX2:
case MIDR_BRCM_VULCAN:
return false; return false;
}
/* Defer to CPU feature registers */ /* Defer to CPU feature registers */
return !cpuid_feature_extract_unsigned_field(pfr0, return !has_cpuid_feature(entry, scope);
ID_AA64PFR0_CSV3_SHIFT);
} }
static int kpti_install_ng_mappings(void *__unused) static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{ {
typedef void (kpti_remap_fn)(int, int, phys_addr_t); typedef void (kpti_remap_fn)(int, int, phys_addr_t);
extern kpti_remap_fn idmap_kpti_install_ng_mappings; extern kpti_remap_fn idmap_kpti_install_ng_mappings;
...@@ -904,7 +924,7 @@ static int kpti_install_ng_mappings(void *__unused) ...@@ -904,7 +924,7 @@ static int kpti_install_ng_mappings(void *__unused)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (kpti_applied) if (kpti_applied)
return 0; return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
...@@ -915,7 +935,7 @@ static int kpti_install_ng_mappings(void *__unused) ...@@ -915,7 +935,7 @@ static int kpti_install_ng_mappings(void *__unused)
if (!cpu) if (!cpu)
kpti_applied = true; kpti_applied = true;
return 0; return;
} }
static int __init parse_kpti(char *str) static int __init parse_kpti(char *str)
...@@ -932,7 +952,78 @@ static int __init parse_kpti(char *str) ...@@ -932,7 +952,78 @@ static int __init parse_kpti(char *str)
__setup("kpti=", parse_kpti); __setup("kpti=", parse_kpti);
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
static int cpu_copy_el2regs(void *__unused) #ifdef CONFIG_ARM64_HW_AFDBM
static inline void __cpu_enable_hw_dbm(void)
{
u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
write_sysreg(tcr, tcr_el1);
isb();
}
static bool cpu_has_broken_dbm(void)
{
/* List of CPUs which have broken DBM support. */
static const struct midr_range cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_1024718
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
#endif
{},
};
return is_midr_in_range_list(read_cpuid_id(), cpus);
}
static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
{
return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
!cpu_has_broken_dbm();
}
static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
{
if (cpu_can_use_dbm(cap))
__cpu_enable_hw_dbm();
}
static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
int __unused)
{
static bool detected = false;
/*
* DBM is a non-conflicting feature. i.e, the kernel can safely
* run a mix of CPUs with and without the feature. So, we
* unconditionally enable the capability to allow any late CPU
* to use the feature. We only enable the control bits on the
* CPU, if it actually supports.
*
* We have to make sure we print the "feature" detection only
* when at least one CPU actually uses it. So check if this CPU
* can actually use it and print the message exactly once.
*
* This is safe as all CPUs (including secondary CPUs - due to the
* LOCAL_CPU scope - and the hotplugged CPUs - via verification)
* goes through the "matches" check exactly once. Also if a CPU
* matches the criteria, it is guaranteed that the CPU will turn
* the DBM on, as the capability is unconditionally enabled.
*/
if (!detected && cpu_can_use_dbm(cap)) {
detected = true;
pr_info("detected: Hardware dirty bit management\n");
}
return true;
}
#endif
#ifdef CONFIG_ARM64_VHE
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
{
return is_kernel_in_hyp_mode();
}
static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
{ {
/* /*
* Copy register values that aren't redirected by hardware. * Copy register values that aren't redirected by hardware.
...@@ -944,15 +1035,14 @@ static int cpu_copy_el2regs(void *__unused) ...@@ -944,15 +1035,14 @@ static int cpu_copy_el2regs(void *__unused)
*/ */
if (!alternatives_applied) if (!alternatives_applied)
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
return 0;
} }
#endif
static const struct arm64_cpu_capabilities arm64_features[] = { static const struct arm64_cpu_capabilities arm64_features[] = {
{ {
.desc = "GIC system register CPU interface", .desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF, .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_useable_gicv3_cpuif, .matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT, .field_pos = ID_AA64PFR0_GIC_SHIFT,
...@@ -963,20 +1053,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -963,20 +1053,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{ {
.desc = "Privileged Access Never", .desc = "Privileged Access Never",
.capability = ARM64_HAS_PAN, .capability = ARM64_HAS_PAN,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1, .sys_reg = SYS_ID_AA64MMFR1_EL1,
.field_pos = ID_AA64MMFR1_PAN_SHIFT, .field_pos = ID_AA64MMFR1_PAN_SHIFT,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.min_field_value = 1, .min_field_value = 1,
.enable = cpu_enable_pan, .cpu_enable = cpu_enable_pan,
}, },
#endif /* CONFIG_ARM64_PAN */ #endif /* CONFIG_ARM64_PAN */
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
{ {
.desc = "LSE atomic instructions", .desc = "LSE atomic instructions",
.capability = ARM64_HAS_LSE_ATOMICS, .capability = ARM64_HAS_LSE_ATOMICS,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1, .sys_reg = SYS_ID_AA64ISAR0_EL1,
.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
...@@ -987,14 +1077,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -987,14 +1077,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{ {
.desc = "Software prefetching using PRFM", .desc = "Software prefetching using PRFM",
.capability = ARM64_HAS_NO_HW_PREFETCH, .capability = ARM64_HAS_NO_HW_PREFETCH,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.matches = has_no_hw_prefetch, .matches = has_no_hw_prefetch,
}, },
#ifdef CONFIG_ARM64_UAO #ifdef CONFIG_ARM64_UAO
{ {
.desc = "User Access Override", .desc = "User Access Override",
.capability = ARM64_HAS_UAO, .capability = ARM64_HAS_UAO,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR2_EL1, .sys_reg = SYS_ID_AA64MMFR2_EL1,
.field_pos = ID_AA64MMFR2_UAO_SHIFT, .field_pos = ID_AA64MMFR2_UAO_SHIFT,
...@@ -1008,21 +1098,23 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1008,21 +1098,23 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
#ifdef CONFIG_ARM64_PAN #ifdef CONFIG_ARM64_PAN
{ {
.capability = ARM64_ALT_PAN_NOT_UAO, .capability = ARM64_ALT_PAN_NOT_UAO,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = cpufeature_pan_not_uao, .matches = cpufeature_pan_not_uao,
}, },
#endif /* CONFIG_ARM64_PAN */ #endif /* CONFIG_ARM64_PAN */
#ifdef CONFIG_ARM64_VHE
{ {
.desc = "Virtualization Host Extensions", .desc = "Virtualization Host Extensions",
.capability = ARM64_HAS_VIRT_HOST_EXTN, .capability = ARM64_HAS_VIRT_HOST_EXTN,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
.matches = runs_at_el2, .matches = runs_at_el2,
.enable = cpu_copy_el2regs, .cpu_enable = cpu_copy_el2regs,
}, },
#endif /* CONFIG_ARM64_VHE */
{ {
.desc = "32-bit EL0 Support", .desc = "32-bit EL0 Support",
.capability = ARM64_HAS_32BIT_EL0, .capability = ARM64_HAS_32BIT_EL0,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
...@@ -1032,22 +1124,30 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1032,22 +1124,30 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{ {
.desc = "Reduced HYP mapping offset", .desc = "Reduced HYP mapping offset",
.capability = ARM64_HYP_OFFSET_LOW, .capability = ARM64_HYP_OFFSET_LOW,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = hyp_offset_low, .matches = hyp_offset_low,
}, },
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
{ {
.desc = "Kernel page table isolation (KPTI)", .desc = "Kernel page table isolation (KPTI)",
.capability = ARM64_UNMAP_KERNEL_AT_EL0, .capability = ARM64_UNMAP_KERNEL_AT_EL0,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
/*
* The ID feature fields below are used to indicate that
* the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
* more details.
*/
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_CSV3_SHIFT,
.min_field_value = 1,
.matches = unmap_kernel_at_el0, .matches = unmap_kernel_at_el0,
.enable = kpti_install_ng_mappings, .cpu_enable = kpti_install_ng_mappings,
}, },
#endif #endif
{ {
/* FP/SIMD is not implemented */ /* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD, .capability = ARM64_HAS_NO_FPSIMD,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.min_field_value = 0, .min_field_value = 0,
.matches = has_no_fpsimd, .matches = has_no_fpsimd,
}, },
...@@ -1055,7 +1155,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1055,7 +1155,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{ {
.desc = "Data cache clean to Point of Persistence", .desc = "Data cache clean to Point of Persistence",
.capability = ARM64_HAS_DCPOP, .capability = ARM64_HAS_DCPOP,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR1_EL1, .sys_reg = SYS_ID_AA64ISAR1_EL1,
.field_pos = ID_AA64ISAR1_DPB_SHIFT, .field_pos = ID_AA64ISAR1_DPB_SHIFT,
...@@ -1065,42 +1165,74 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1065,42 +1165,74 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
{ {
.desc = "Scalable Vector Extension", .desc = "Scalable Vector Extension",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SVE, .capability = ARM64_SVE,
.def_scope = SCOPE_SYSTEM,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_SVE_SHIFT, .field_pos = ID_AA64PFR0_SVE_SHIFT,
.min_field_value = ID_AA64PFR0_SVE, .min_field_value = ID_AA64PFR0_SVE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.enable = sve_kernel_enable, .cpu_enable = sve_kernel_enable,
}, },
#endif /* CONFIG_ARM64_SVE */ #endif /* CONFIG_ARM64_SVE */
#ifdef CONFIG_ARM64_RAS_EXTN #ifdef CONFIG_ARM64_RAS_EXTN
{ {
.desc = "RAS Extension Support", .desc = "RAS Extension Support",
.capability = ARM64_HAS_RAS_EXTN, .capability = ARM64_HAS_RAS_EXTN,
.def_scope = SCOPE_SYSTEM, .type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1, .sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_RAS_SHIFT, .field_pos = ID_AA64PFR0_RAS_SHIFT,
.min_field_value = ID_AA64PFR0_RAS_V1, .min_field_value = ID_AA64PFR0_RAS_V1,
.enable = cpu_clear_disr, .cpu_enable = cpu_clear_disr,
}, },
#endif /* CONFIG_ARM64_RAS_EXTN */ #endif /* CONFIG_ARM64_RAS_EXTN */
{
.desc = "Data cache clean to the PoU not required for I/D coherence",
.capability = ARM64_HAS_CACHE_IDC,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cache_idc,
},
{
.desc = "Instruction cache invalidation not required for I/D coherence",
.capability = ARM64_HAS_CACHE_DIC,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cache_dic,
},
#ifdef CONFIG_ARM64_HW_AFDBM
{
/*
* Since we turn this on always, we don't want the user to
* think that the feature is available when it may not be.
* So hide the description.
*
* .desc = "Hardware pagetable Dirty Bit Management",
*
*/
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.capability = ARM64_HW_DBM,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR1_HADBS_SHIFT,
.min_field_value = 2,
.matches = has_hw_dbm,
.cpu_enable = cpu_enable_hw_dbm,
},
#endif
{}, {},
}; };
#define HWCAP_CAP(reg, field, s, min_value, type, cap) \ #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \ { \
.desc = #cap, \ .desc = #cap, \
.def_scope = SCOPE_SYSTEM, \ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
.matches = has_cpuid_feature, \ .matches = has_cpuid_feature, \
.sys_reg = reg, \ .sys_reg = reg, \
.field_pos = field, \ .field_pos = field, \
.sign = s, \ .sign = s, \
.min_field_value = min_value, \ .min_field_value = min_value, \
.hwcap_type = type, \ .hwcap_type = cap_type, \
.hwcap = cap, \ .hwcap = cap, \
} }
...@@ -1118,14 +1250,18 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { ...@@ -1118,14 +1250,18 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM), HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
#endif #endif
...@@ -1193,7 +1329,7 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps) ...@@ -1193,7 +1329,7 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
/* We support emulation of accesses to CPU ID feature registers */ /* We support emulation of accesses to CPU ID feature registers */
elf_hwcap |= HWCAP_CPUID; elf_hwcap |= HWCAP_CPUID;
for (; hwcaps->matches; hwcaps++) for (; hwcaps->matches; hwcaps++)
if (hwcaps->matches(hwcaps, hwcaps->def_scope)) if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
cap_set_elf_hwcap(hwcaps); cap_set_elf_hwcap(hwcaps);
} }
...@@ -1210,17 +1346,19 @@ static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array, ...@@ -1210,17 +1346,19 @@ static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
return false; return false;
for (caps = cap_array; caps->matches; caps++) for (caps = cap_array; caps->matches; caps++)
if (caps->capability == cap && if (caps->capability == cap)
caps->matches(caps, SCOPE_LOCAL_CPU)) return caps->matches(caps, SCOPE_LOCAL_CPU);
return true;
return false; return false;
} }
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info) u16 scope_mask, const char *info)
{ {
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
for (; caps->matches; caps++) { for (; caps->matches; caps++) {
if (!caps->matches(caps, caps->def_scope)) if (!(caps->type & scope_mask) ||
!caps->matches(caps, cpucap_default_scope(caps)))
continue; continue;
if (!cpus_have_cap(caps->capability) && caps->desc) if (!cpus_have_cap(caps->capability) && caps->desc)
...@@ -1229,41 +1367,145 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, ...@@ -1229,41 +1367,145 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
} }
} }
static void update_cpu_capabilities(u16 scope_mask)
{
__update_cpu_capabilities(arm64_features, scope_mask, "detected:");
__update_cpu_capabilities(arm64_errata, scope_mask,
"enabling workaround for");
}
static int __enable_cpu_capability(void *arg)
{
const struct arm64_cpu_capabilities *cap = arg;
cap->cpu_enable(cap);
return 0;
}
/* /*
* Run through the enabled capabilities and enable() it on all active * Run through the enabled capabilities and enable() it on all active
* CPUs * CPUs
*/ */
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) static void __init
__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
u16 scope_mask)
{ {
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
for (; caps->matches; caps++) { for (; caps->matches; caps++) {
unsigned int num = caps->capability; unsigned int num = caps->capability;
if (!cpus_have_cap(num)) if (!(caps->type & scope_mask) || !cpus_have_cap(num))
continue; continue;
/* Ensure cpus_have_const_cap(num) works */ /* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]); static_branch_enable(&cpu_hwcap_keys[num]);
if (caps->enable) { if (caps->cpu_enable) {
/* /*
* Use stop_machine() as it schedules the work allowing * Capabilities with SCOPE_BOOT_CPU scope are finalised
* us to modify PSTATE, instead of on_each_cpu() which * before any secondary CPU boots. Thus, each secondary
* uses an IPI, giving us a PSTATE that disappears when * will enable the capability as appropriate via
* we return. * check_local_cpu_capabilities(). The only exception is
* the boot CPU, for which the capability must be
* enabled here. This approach avoids costly
* stop_machine() calls for this case.
*
* Otherwise, use stop_machine() as it schedules the
* work allowing us to modify PSTATE, instead of
* on_each_cpu() which uses an IPI, giving us a PSTATE
* that disappears when we return.
*/ */
stop_machine(caps->enable, (void *)caps, cpu_online_mask); if (scope_mask & SCOPE_BOOT_CPU)
caps->cpu_enable(caps);
else
stop_machine(__enable_cpu_capability,
(void *)caps, cpu_online_mask);
} }
} }
} }
static void __init enable_cpu_capabilities(u16 scope_mask)
{
__enable_cpu_capabilities(arm64_features, scope_mask);
__enable_cpu_capabilities(arm64_errata, scope_mask);
}
/*
* Run through the list of capabilities to check for conflicts.
* If the system has already detected a capability, take necessary
* action on this CPU.
*
* Returns "false" on conflicts.
*/
static bool
__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
u16 scope_mask)
{
bool cpu_has_cap, system_has_cap;
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
for (; caps->matches; caps++) {
if (!(caps->type & scope_mask))
continue;
cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
system_has_cap = cpus_have_cap(caps->capability);
if (system_has_cap) {
/*
* Check if the new CPU misses an advertised feature,
* which is not safe to miss.
*/
if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
break;
/*
* We have to issue cpu_enable() irrespective of
* whether the CPU has it or not, as it is enabeld
* system wide. It is upto the call back to take
* appropriate action on this CPU.
*/
if (caps->cpu_enable)
caps->cpu_enable(caps);
} else {
/*
* Check if the CPU has this capability if it isn't
* safe to have when the system doesn't.
*/
if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
break;
}
}
if (caps->matches) {
pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
smp_processor_id(), caps->capability,
caps->desc, system_has_cap, cpu_has_cap);
return false;
}
return true;
}
static bool verify_local_cpu_caps(u16 scope_mask)
{
return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
__verify_local_cpu_caps(arm64_features, scope_mask);
}
/* /*
* Check for CPU features that are used in early boot * Check for CPU features that are used in early boot
* based on the Boot CPU value. * based on the Boot CPU value.
*/ */
static void check_early_cpu_features(void) static void check_early_cpu_features(void)
{ {
verify_cpu_run_el();
verify_cpu_asid_bits(); verify_cpu_asid_bits();
/*
* Early features are used by the kernel already. If there
* is a conflict, we cannot proceed further.
*/
if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
cpu_panic_kernel();
} }
static void static void
...@@ -1278,27 +1520,6 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps) ...@@ -1278,27 +1520,6 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
} }
} }
static void
verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
{
const struct arm64_cpu_capabilities *caps = caps_list;
for (; caps->matches; caps++) {
if (!cpus_have_cap(caps->capability))
continue;
/*
* If the new CPU misses an advertised feature, we cannot proceed
* further, park the cpu.
*/
if (!__this_cpu_has_cap(caps_list, caps->capability)) {
pr_crit("CPU%d: missing feature: %s\n",
smp_processor_id(), caps->desc);
cpu_die_early();
}
if (caps->enable)
caps->enable((void *)caps);
}
}
static void verify_sve_features(void) static void verify_sve_features(void)
{ {
u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
...@@ -1316,6 +1537,7 @@ static void verify_sve_features(void) ...@@ -1316,6 +1537,7 @@ static void verify_sve_features(void)
/* Add checks on other ZCR bits here if necessary */ /* Add checks on other ZCR bits here if necessary */
} }
/* /*
* Run through the enabled system capabilities and enable() it on this CPU. * Run through the enabled system capabilities and enable() it on this CPU.
* The capabilities were decided based on the available CPUs at the boot time. * The capabilities were decided based on the available CPUs at the boot time.
...@@ -1326,8 +1548,14 @@ static void verify_sve_features(void) ...@@ -1326,8 +1548,14 @@ static void verify_sve_features(void)
*/ */
static void verify_local_cpu_capabilities(void) static void verify_local_cpu_capabilities(void)
{ {
verify_local_cpu_errata_workarounds(); /*
verify_local_cpu_features(arm64_features); * The capabilities with SCOPE_BOOT_CPU are checked from
* check_early_cpu_features(), as they need to be verified
* on all secondary CPUs.
*/
if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
cpu_die_early();
verify_local_elf_hwcaps(arm64_elf_hwcaps); verify_local_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) if (system_supports_32bit_el0())
...@@ -1335,9 +1563,6 @@ static void verify_local_cpu_capabilities(void) ...@@ -1335,9 +1563,6 @@ static void verify_local_cpu_capabilities(void)
if (system_supports_sve()) if (system_supports_sve())
verify_sve_features(); verify_sve_features();
if (system_uses_ttbr0_pan())
pr_info("Emulating Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
} }
void check_local_cpu_capabilities(void) void check_local_cpu_capabilities(void)
...@@ -1350,20 +1575,22 @@ void check_local_cpu_capabilities(void) ...@@ -1350,20 +1575,22 @@ void check_local_cpu_capabilities(void)
/* /*
* If we haven't finalised the system capabilities, this CPU gets * If we haven't finalised the system capabilities, this CPU gets
* a chance to update the errata work arounds. * a chance to update the errata work arounds and local features.
* Otherwise, this CPU should verify that it has all the system * Otherwise, this CPU should verify that it has all the system
* advertised capabilities. * advertised capabilities.
*/ */
if (!sys_caps_initialised) if (!sys_caps_initialised)
update_cpu_errata_workarounds(); update_cpu_capabilities(SCOPE_LOCAL_CPU);
else else
verify_local_cpu_capabilities(); verify_local_cpu_capabilities();
} }
static void __init setup_feature_capabilities(void) static void __init setup_boot_cpu_capabilities(void)
{ {
update_cpu_capabilities(arm64_features, "detected feature:"); /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
enable_cpu_capabilities(arm64_features); update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
/* Enable the SCOPE_BOOT_CPU capabilities alone right away */
enable_cpu_capabilities(SCOPE_BOOT_CPU);
} }
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
...@@ -1382,20 +1609,33 @@ bool this_cpu_has_cap(unsigned int cap) ...@@ -1382,20 +1609,33 @@ bool this_cpu_has_cap(unsigned int cap)
__this_cpu_has_cap(arm64_errata, cap)); __this_cpu_has_cap(arm64_errata, cap));
} }
static void __init setup_system_capabilities(void)
{
/*
* We have finalised the system-wide safe feature
* registers, finalise the capabilities that depend
* on it. Also enable all the available capabilities,
* that are not enabled already.
*/
update_cpu_capabilities(SCOPE_SYSTEM);
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
}
void __init setup_cpu_features(void) void __init setup_cpu_features(void)
{ {
u32 cwg; u32 cwg;
int cls; int cls;
/* Set the CPU feature capabilies */ setup_system_capabilities();
setup_feature_capabilities();
enable_errata_workarounds();
mark_const_caps_ready(); mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps); setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) if (system_supports_32bit_el0())
setup_elf_hwcaps(compat_elf_hwcaps); setup_elf_hwcaps(compat_elf_hwcaps);
if (system_uses_ttbr0_pan())
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
sve_setup(); sve_setup();
/* Advertise that we have computed the system capabilities */ /* Advertise that we have computed the system capabilities */
...@@ -1518,10 +1758,8 @@ static int __init enable_mrs_emulation(void) ...@@ -1518,10 +1758,8 @@ static int __init enable_mrs_emulation(void)
core_initcall(enable_mrs_emulation); core_initcall(enable_mrs_emulation);
int cpu_clear_disr(void *__unused) void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
{ {
/* Firmware may have left a deferred SError in this register. */ /* Firmware may have left a deferred SError in this register. */
write_sysreg_s(0, SYS_DISR_EL1); write_sysreg_s(0, SYS_DISR_EL1);
return 0;
} }
...@@ -77,6 +77,10 @@ static const char *const hwcap_str[] = { ...@@ -77,6 +77,10 @@ static const char *const hwcap_str[] = {
"sha512", "sha512",
"sve", "sve",
"asimdfhm", "asimdfhm",
"dit",
"uscat",
"ilrcpc",
"flagm",
NULL NULL
}; };
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/daifflags.h> #include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/traps.h>
/* Determine debug architecture. */ /* Determine debug architecture. */
u8 debug_monitors_arch(void) u8 debug_monitors_arch(void)
...@@ -223,7 +224,7 @@ static void send_user_sigtrap(int si_code) ...@@ -223,7 +224,7 @@ static void send_user_sigtrap(int si_code)
if (interrupts_enabled(regs)) if (interrupts_enabled(regs))
local_irq_enable(); local_irq_enable();
force_sig_info(SIGTRAP, &info, current); arm64_force_sig_info(&info, "User debug trap", current);
} }
static int single_step_handler(unsigned long addr, unsigned int esr, static int single_step_handler(unsigned long addr, unsigned int esr,
......
...@@ -39,7 +39,9 @@ ...@@ -39,7 +39,9 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <asm/esr.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/simd.h> #include <asm/simd.h>
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
...@@ -64,7 +66,7 @@ ...@@ -64,7 +66,7 @@
* been loaded into its FPSIMD registers most recently, or whether it has * been loaded into its FPSIMD registers most recently, or whether it has
* been used to perform kernel mode NEON in the meantime. * been used to perform kernel mode NEON in the meantime.
* *
* For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
* the id of the current CPU every time the state is loaded onto a CPU. For (b), * the id of the current CPU every time the state is loaded onto a CPU. For (b),
* we add the per-cpu variable 'fpsimd_last_state' (below), which contains the * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
* address of the userland FPSIMD state of the task that was loaded onto the CPU * address of the userland FPSIMD state of the task that was loaded onto the CPU
...@@ -73,7 +75,7 @@ ...@@ -73,7 +75,7 @@
* With this in place, we no longer have to restore the next FPSIMD state right * With this in place, we no longer have to restore the next FPSIMD state right
* when switching between tasks. Instead, we can defer this check to userland * when switching between tasks. Instead, we can defer this check to userland
* resume, at which time we verify whether the CPU's fpsimd_last_state and the * resume, at which time we verify whether the CPU's fpsimd_last_state and the
* task's fpsimd_state.cpu are still mutually in sync. If this is the case, we * task's fpsimd_cpu are still mutually in sync. If this is the case, we
* can omit the FPSIMD restore. * can omit the FPSIMD restore.
* *
* As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
...@@ -90,14 +92,14 @@ ...@@ -90,14 +92,14 @@
* flag with local_bh_disable() unless softirqs are already masked. * flag with local_bh_disable() unless softirqs are already masked.
* *
* For a certain task, the sequence may look something like this: * For a certain task, the sequence may look something like this:
* - the task gets scheduled in; if both the task's fpsimd_state.cpu field * - the task gets scheduled in; if both the task's fpsimd_cpu field
* contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
* variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
* cleared, otherwise it is set; * cleared, otherwise it is set;
* *
* - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
* userland FPSIMD state is copied from memory to the registers, the task's * userland FPSIMD state is copied from memory to the registers, the task's
* fpsimd_state.cpu field is set to the id of the current CPU, the current * fpsimd_cpu field is set to the id of the current CPU, the current
* CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
* TIF_FOREIGN_FPSTATE flag is cleared; * TIF_FOREIGN_FPSTATE flag is cleared;
* *
...@@ -115,7 +117,7 @@ ...@@ -115,7 +117,7 @@
* whatever is in the FPSIMD registers is not saved to memory, but discarded. * whatever is in the FPSIMD registers is not saved to memory, but discarded.
*/ */
struct fpsimd_last_state_struct { struct fpsimd_last_state_struct {
struct fpsimd_state *st; struct user_fpsimd_state *st;
bool sve_in_use; bool sve_in_use;
}; };
...@@ -222,7 +224,7 @@ static void sve_user_enable(void) ...@@ -222,7 +224,7 @@ static void sve_user_enable(void)
* sets TIF_SVE. * sets TIF_SVE.
* *
* When stored, FPSIMD registers V0-V31 are encoded in * When stored, FPSIMD registers V0-V31 are encoded in
* task->fpsimd_state; bits [max : 128] for each of Z0-Z31 are * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
* logically zero but not stored anywhere; P0-P15 and FFR are not * logically zero but not stored anywhere; P0-P15 and FFR are not
* stored and have unspecified values from userspace's point of * stored and have unspecified values from userspace's point of
* view. For hygiene purposes, the kernel zeroes them on next use, * view. For hygiene purposes, the kernel zeroes them on next use,
...@@ -231,9 +233,9 @@ static void sve_user_enable(void) ...@@ -231,9 +233,9 @@ static void sve_user_enable(void)
* task->thread.sve_state does not need to be non-NULL, valid or any * task->thread.sve_state does not need to be non-NULL, valid or any
* particular size: it must not be dereferenced. * particular size: it must not be dereferenced.
* *
* * FPSR and FPCR are always stored in task->fpsimd_state irrespctive of * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
* whether TIF_SVE is clear or set, since these are not vector length * irrespective of whether TIF_SVE is clear or set, since these are
* dependent. * not vector length dependent.
*/ */
/* /*
...@@ -251,10 +253,10 @@ static void task_fpsimd_load(void) ...@@ -251,10 +253,10 @@ static void task_fpsimd_load(void)
if (system_supports_sve() && test_thread_flag(TIF_SVE)) if (system_supports_sve() && test_thread_flag(TIF_SVE))
sve_load_state(sve_pffr(current), sve_load_state(sve_pffr(current),
&current->thread.fpsimd_state.fpsr, &current->thread.uw.fpsimd_state.fpsr,
sve_vq_from_vl(current->thread.sve_vl) - 1); sve_vq_from_vl(current->thread.sve_vl) - 1);
else else
fpsimd_load_state(&current->thread.fpsimd_state); fpsimd_load_state(&current->thread.uw.fpsimd_state);
if (system_supports_sve()) { if (system_supports_sve()) {
/* Toggle SVE trapping for userspace if needed */ /* Toggle SVE trapping for userspace if needed */
...@@ -285,15 +287,14 @@ static void task_fpsimd_save(void) ...@@ -285,15 +287,14 @@ static void task_fpsimd_save(void)
* re-enter user with corrupt state. * re-enter user with corrupt state.
* There's no way to recover, so kill it: * There's no way to recover, so kill it:
*/ */
force_signal_inject( force_signal_inject(SIGKILL, SI_KERNEL, 0);
SIGKILL, 0, current_pt_regs(), 0);
return; return;
} }
sve_save_state(sve_pffr(current), sve_save_state(sve_pffr(current),
&current->thread.fpsimd_state.fpsr); &current->thread.uw.fpsimd_state.fpsr);
} else } else
fpsimd_save_state(&current->thread.fpsimd_state); fpsimd_save_state(&current->thread.uw.fpsimd_state);
} }
} }
...@@ -404,20 +405,21 @@ static int __init sve_sysctl_init(void) { return 0; } ...@@ -404,20 +405,21 @@ static int __init sve_sysctl_init(void) { return 0; }
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
/* /*
* Transfer the FPSIMD state in task->thread.fpsimd_state to * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
* task->thread.sve_state. * task->thread.sve_state.
* *
* Task can be a non-runnable task, or current. In the latter case, * Task can be a non-runnable task, or current. In the latter case,
* softirqs (and preemption) must be disabled. * softirqs (and preemption) must be disabled.
* task->thread.sve_state must point to at least sve_state_size(task) * task->thread.sve_state must point to at least sve_state_size(task)
* bytes of allocated kernel memory. * bytes of allocated kernel memory.
* task->thread.fpsimd_state must be up to date before calling this function. * task->thread.uw.fpsimd_state must be up to date before calling this
* function.
*/ */
static void fpsimd_to_sve(struct task_struct *task) static void fpsimd_to_sve(struct task_struct *task)
{ {
unsigned int vq; unsigned int vq;
void *sst = task->thread.sve_state; void *sst = task->thread.sve_state;
struct fpsimd_state const *fst = &task->thread.fpsimd_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i; unsigned int i;
if (!system_supports_sve()) if (!system_supports_sve())
...@@ -431,7 +433,7 @@ static void fpsimd_to_sve(struct task_struct *task) ...@@ -431,7 +433,7 @@ static void fpsimd_to_sve(struct task_struct *task)
/* /*
* Transfer the SVE state in task->thread.sve_state to * Transfer the SVE state in task->thread.sve_state to
* task->thread.fpsimd_state. * task->thread.uw.fpsimd_state.
* *
* Task can be a non-runnable task, or current. In the latter case, * Task can be a non-runnable task, or current. In the latter case,
* softirqs (and preemption) must be disabled. * softirqs (and preemption) must be disabled.
...@@ -443,7 +445,7 @@ static void sve_to_fpsimd(struct task_struct *task) ...@@ -443,7 +445,7 @@ static void sve_to_fpsimd(struct task_struct *task)
{ {
unsigned int vq; unsigned int vq;
void const *sst = task->thread.sve_state; void const *sst = task->thread.sve_state;
struct fpsimd_state *fst = &task->thread.fpsimd_state; struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
unsigned int i; unsigned int i;
if (!system_supports_sve()) if (!system_supports_sve())
...@@ -510,7 +512,7 @@ void fpsimd_sync_to_sve(struct task_struct *task) ...@@ -510,7 +512,7 @@ void fpsimd_sync_to_sve(struct task_struct *task)
} }
/* /*
* Ensure that task->thread.fpsimd_state is up to date with respect to * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
* the user task, irrespective of whether SVE is in use or not. * the user task, irrespective of whether SVE is in use or not.
* *
* This should only be called by ptrace. task must be non-runnable. * This should only be called by ptrace. task must be non-runnable.
...@@ -525,21 +527,21 @@ void sve_sync_to_fpsimd(struct task_struct *task) ...@@ -525,21 +527,21 @@ void sve_sync_to_fpsimd(struct task_struct *task)
/* /*
* Ensure that task->thread.sve_state is up to date with respect to * Ensure that task->thread.sve_state is up to date with respect to
* the task->thread.fpsimd_state. * the task->thread.uw.fpsimd_state.
* *
* This should only be called by ptrace to merge new FPSIMD register * This should only be called by ptrace to merge new FPSIMD register
* values into a task for which SVE is currently active. * values into a task for which SVE is currently active.
* task must be non-runnable. * task must be non-runnable.
* task->thread.sve_state must point to at least sve_state_size(task) * task->thread.sve_state must point to at least sve_state_size(task)
* bytes of allocated kernel memory. * bytes of allocated kernel memory.
* task->thread.fpsimd_state must already have been initialised with * task->thread.uw.fpsimd_state must already have been initialised with
* the new FPSIMD register values to be merged in. * the new FPSIMD register values to be merged in.
*/ */
void sve_sync_from_fpsimd_zeropad(struct task_struct *task) void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
{ {
unsigned int vq; unsigned int vq;
void *sst = task->thread.sve_state; void *sst = task->thread.sve_state;
struct fpsimd_state const *fst = &task->thread.fpsimd_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i; unsigned int i;
if (!test_tsk_thread_flag(task, TIF_SVE)) if (!test_tsk_thread_flag(task, TIF_SVE))
...@@ -757,12 +759,10 @@ static void __init sve_efi_setup(void) ...@@ -757,12 +759,10 @@ static void __init sve_efi_setup(void)
* Enable SVE for EL1. * Enable SVE for EL1.
* Intended for use by the cpufeatures code during CPU boot. * Intended for use by the cpufeatures code during CPU boot.
*/ */
int sve_kernel_enable(void *__always_unused p) void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
{ {
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
isb(); isb();
return 0;
} }
void __init sve_setup(void) void __init sve_setup(void)
...@@ -831,7 +831,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) ...@@ -831,7 +831,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
{ {
/* Even if we chose not to use SVE, the hardware could still trap: */ /* Even if we chose not to use SVE, the hardware could still trap: */
if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
return; return;
} }
...@@ -867,18 +867,20 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) ...@@ -867,18 +867,20 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{ {
siginfo_t info; siginfo_t info;
unsigned int si_code = FPE_FIXME; unsigned int si_code = FPE_FLTUNK;
if (esr & FPEXC_IOF) if (esr & ESR_ELx_FP_EXC_TFV) {
si_code = FPE_FLTINV; if (esr & FPEXC_IOF)
else if (esr & FPEXC_DZF) si_code = FPE_FLTINV;
si_code = FPE_FLTDIV; else if (esr & FPEXC_DZF)
else if (esr & FPEXC_OFF) si_code = FPE_FLTDIV;
si_code = FPE_FLTOVF; else if (esr & FPEXC_OFF)
else if (esr & FPEXC_UFF) si_code = FPE_FLTOVF;
si_code = FPE_FLTUND; else if (esr & FPEXC_UFF)
else if (esr & FPEXC_IXF) si_code = FPE_FLTUND;
si_code = FPE_FLTRES; else if (esr & FPEXC_IXF)
si_code = FPE_FLTRES;
}
memset(&info, 0, sizeof(info)); memset(&info, 0, sizeof(info));
info.si_signo = SIGFPE; info.si_signo = SIGFPE;
...@@ -908,10 +910,9 @@ void fpsimd_thread_switch(struct task_struct *next) ...@@ -908,10 +910,9 @@ void fpsimd_thread_switch(struct task_struct *next)
* the TIF_FOREIGN_FPSTATE flag so the state will be loaded * the TIF_FOREIGN_FPSTATE flag so the state will be loaded
* upon the next return to userland. * upon the next return to userland.
*/ */
struct fpsimd_state *st = &next->thread.fpsimd_state; if (__this_cpu_read(fpsimd_last_state.st) ==
&next->thread.uw.fpsimd_state
if (__this_cpu_read(fpsimd_last_state.st) == st && next->thread.fpsimd_cpu == smp_processor_id())
&& st->cpu == smp_processor_id())
clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
else else
set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
...@@ -927,7 +928,8 @@ void fpsimd_flush_thread(void) ...@@ -927,7 +928,8 @@ void fpsimd_flush_thread(void)
local_bh_disable(); local_bh_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); memset(&current->thread.uw.fpsimd_state, 0,
sizeof(current->thread.uw.fpsimd_state));
fpsimd_flush_task_state(current); fpsimd_flush_task_state(current);
if (system_supports_sve()) { if (system_supports_sve()) {
...@@ -986,7 +988,7 @@ void fpsimd_preserve_current_state(void) ...@@ -986,7 +988,7 @@ void fpsimd_preserve_current_state(void)
/* /*
* Like fpsimd_preserve_current_state(), but ensure that * Like fpsimd_preserve_current_state(), but ensure that
* current->thread.fpsimd_state is updated so that it can be copied to * current->thread.uw.fpsimd_state is updated so that it can be copied to
* the signal frame. * the signal frame.
*/ */
void fpsimd_signal_preserve_current_state(void) void fpsimd_signal_preserve_current_state(void)
...@@ -1004,11 +1006,10 @@ static void fpsimd_bind_to_cpu(void) ...@@ -1004,11 +1006,10 @@ static void fpsimd_bind_to_cpu(void)
{ {
struct fpsimd_last_state_struct *last = struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state); this_cpu_ptr(&fpsimd_last_state);
struct fpsimd_state *st = &current->thread.fpsimd_state;
last->st = st; last->st = &current->thread.uw.fpsimd_state;
last->sve_in_use = test_thread_flag(TIF_SVE); last->sve_in_use = test_thread_flag(TIF_SVE);
st->cpu = smp_processor_id(); current->thread.fpsimd_cpu = smp_processor_id();
} }
/* /*
...@@ -1043,7 +1044,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) ...@@ -1043,7 +1044,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
local_bh_disable(); local_bh_disable();
current->thread.fpsimd_state.user_fpsimd = *state; current->thread.uw.fpsimd_state = *state;
if (system_supports_sve() && test_thread_flag(TIF_SVE)) if (system_supports_sve() && test_thread_flag(TIF_SVE))
fpsimd_to_sve(current); fpsimd_to_sve(current);
...@@ -1060,7 +1061,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) ...@@ -1060,7 +1061,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
*/ */
void fpsimd_flush_task_state(struct task_struct *t) void fpsimd_flush_task_state(struct task_struct *t)
{ {
t->thread.fpsimd_state.cpu = NR_CPUS; t->thread.fpsimd_cpu = NR_CPUS;
} }
static inline void fpsimd_flush_cpu_state(void) static inline void fpsimd_flush_cpu_state(void)
...@@ -1159,7 +1160,7 @@ EXPORT_SYMBOL(kernel_neon_end); ...@@ -1159,7 +1160,7 @@ EXPORT_SYMBOL(kernel_neon_end);
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state); static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
static DEFINE_PER_CPU(bool, efi_sve_state_used); static DEFINE_PER_CPU(bool, efi_sve_state_used);
......
...@@ -117,53 +117,42 @@ u64 __init kaslr_early_init(u64 dt_phys) ...@@ -117,53 +117,42 @@ u64 __init kaslr_early_init(u64 dt_phys)
/* /*
* OK, so we are proceeding with KASLR enabled. Calculate a suitable * OK, so we are proceeding with KASLR enabled. Calculate a suitable
* kernel image offset from the seed. Let's place the kernel in the * kernel image offset from the seed. Let's place the kernel in the
* lower half of the VMALLOC area (VA_BITS - 2). * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
* the lower and upper quarters to avoid colliding with other
* allocations.
* Even if we could randomize at page granularity for 16k and 64k pages, * Even if we could randomize at page granularity for 16k and 64k pages,
* let's always round to 2 MB so we don't interfere with the ability to * let's always round to 2 MB so we don't interfere with the ability to
* map using contiguous PTEs * map using contiguous PTEs
*/ */
mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1); mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
offset = seed & mask; offset = BIT(VA_BITS - 3) + (seed & mask);
/* use the top 16 bits to randomize the linear region */ /* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48; memstart_offset_seed = seed >> 48;
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
* happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
*
* NOTE: The references to _text and _end below will already take the
* modulo offset (the physical displacement modulo 2 MB) into
* account, given that the physical placement is controlled by
* the loader, and will not change as a result of the virtual
* mapping we choose.
*/
if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
(((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
if (IS_ENABLED(CONFIG_KASAN)) if (IS_ENABLED(CONFIG_KASAN))
/* /*
* KASAN does not expect the module region to intersect the * KASAN does not expect the module region to intersect the
* vmalloc region, since shadow memory is allocated for each * vmalloc region, since shadow memory is allocated for each
* module at load time, whereas the vmalloc region is shadowed * module at load time, whereas the vmalloc region is shadowed
* by KASAN zero pages. So keep modules out of the vmalloc * by KASAN zero pages. So keep modules out of the vmalloc
* region if KASAN is enabled. * region if KASAN is enabled, and put the kernel well within
* 4 GB of the module region.
*/ */
return offset; return offset % SZ_2G;
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/* /*
* Randomize the module region independently from the core * Randomize the module region over a 4 GB window covering the
* kernel. This prevents modules from leaking any information * kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in * about the address of the kernel itself, but results in
* branches between modules and the core kernel that are * branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be * resolved via PLTs. (Branches between modules will be
* resolved normally.) * resolved normally.)
*/ */
module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE; module_range = SZ_4G - (u64)(_end - _stext);
module_alloc_base = VMALLOC_START; module_alloc_base = max((u64)_end + offset - SZ_4G,
(u64)MODULES_VADDR);
} else { } else {
/* /*
* Randomize the module region by setting module_alloc_base to * Randomize the module region by setting module_alloc_base to
......
...@@ -138,14 +138,25 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) ...@@ -138,14 +138,25 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
{ {
struct pt_regs *thread_regs; struct cpu_context *cpu_context = &task->thread.cpu_context;
/* Initialize to zero */ /* Initialize to zero */
memset((char *)gdb_regs, 0, NUMREGBYTES); memset((char *)gdb_regs, 0, NUMREGBYTES);
thread_regs = task_pt_regs(task);
memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES); gdb_regs[19] = cpu_context->x19;
/* Special case for PSTATE (check comments in asm/kgdb.h for details) */ gdb_regs[20] = cpu_context->x20;
dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs); gdb_regs[21] = cpu_context->x21;
gdb_regs[22] = cpu_context->x22;
gdb_regs[23] = cpu_context->x23;
gdb_regs[24] = cpu_context->x24;
gdb_regs[25] = cpu_context->x25;
gdb_regs[26] = cpu_context->x26;
gdb_regs[27] = cpu_context->x27;
gdb_regs[28] = cpu_context->x28;
gdb_regs[29] = cpu_context->fp;
gdb_regs[31] = cpu_context->sp;
gdb_regs[32] = cpu_context->pc;
} }
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
......
...@@ -36,11 +36,53 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, ...@@ -36,11 +36,53 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
return (u64)&plt[i - 1]; return (u64)&plt[i - 1];
pltsec->plt_num_entries++; pltsec->plt_num_entries++;
BUG_ON(pltsec->plt_num_entries > pltsec->plt_max_entries); if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
return 0;
return (u64)&plt[i]; return (u64)&plt[i];
} }
#ifdef CONFIG_ARM64_ERRATUM_843419
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
int i = pltsec->plt_num_entries++;
u32 mov0, mov1, mov2, br;
int rd;
if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
return 0;
/* get the destination register of the ADRP instruction */
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
le32_to_cpup((__le32 *)loc));
/* generate the veneer instructions */
mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_INVERSE);
mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_KEEP);
mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_KEEP);
br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
AARCH64_INSN_BRANCH_NOLINK);
plt[i] = (struct plt_entry){
cpu_to_le32(mov0),
cpu_to_le32(mov1),
cpu_to_le32(mov2),
cpu_to_le32(br)
};
return (u64)&plt[i];
}
#endif
#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
static int cmp_rela(const void *a, const void *b) static int cmp_rela(const void *a, const void *b)
...@@ -68,16 +110,21 @@ static bool duplicate_rel(const Elf64_Rela *rela, int num) ...@@ -68,16 +110,21 @@ static bool duplicate_rel(const Elf64_Rela *rela, int num)
} }
static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
Elf64_Word dstidx) Elf64_Word dstidx, Elf_Shdr *dstsec)
{ {
unsigned int ret = 0; unsigned int ret = 0;
Elf64_Sym *s; Elf64_Sym *s;
int i; int i;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
u64 min_align;
switch (ELF64_R_TYPE(rela[i].r_info)) { switch (ELF64_R_TYPE(rela[i].r_info)) {
case R_AARCH64_JUMP26: case R_AARCH64_JUMP26:
case R_AARCH64_CALL26: case R_AARCH64_CALL26:
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
break;
/* /*
* We only have to consider branch targets that resolve * We only have to consider branch targets that resolve
* to symbols that are defined in a different section. * to symbols that are defined in a different section.
...@@ -109,6 +156,41 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, ...@@ -109,6 +156,41 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
if (rela[i].r_addend != 0 || !duplicate_rel(rela, i)) if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
ret++; ret++;
break; break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
case R_AARCH64_ADR_PREL_PG_HI21:
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
!cpus_have_const_cap(ARM64_WORKAROUND_843419))
break;
/*
* Determine the minimal safe alignment for this ADRP
* instruction: the section alignment at which it is
* guaranteed not to appear at a vulnerable offset.
*
* This comes down to finding the least significant zero
* bit in bits [11:3] of the section offset, and
* increasing the section's alignment so that the
* resulting address of this instruction is guaranteed
* to equal the offset in that particular bit (as well
* as all less signficant bits). This ensures that the
* address modulo 4 KB != 0xfff8 or 0xfffc (which would
* have all ones in bits [11:3])
*/
min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
/*
* Allocate veneer space for each ADRP that may appear
* at a vulnerable offset nonetheless. At relocation
* time, some of these will remain unused since some
* ADRP instructions can be patched to ADR instructions
* instead.
*/
if (min_align > SZ_4K)
ret++;
else
dstsec->sh_addralign = max(dstsec->sh_addralign,
min_align);
break;
} }
} }
return ret; return ret;
...@@ -166,10 +248,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, ...@@ -166,10 +248,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
core_plts += count_plts(syms, rels, numrels, core_plts += count_plts(syms, rels, numrels,
sechdrs[i].sh_info); sechdrs[i].sh_info, dstsec);
else else
init_plts += count_plts(syms, rels, numrels, init_plts += count_plts(syms, rels, numrels,
sechdrs[i].sh_info); sechdrs[i].sh_info, dstsec);
} }
mod->arch.core.plt->sh_type = SHT_NOBITS; mod->arch.core.plt->sh_type = SHT_NOBITS;
......
...@@ -55,9 +55,10 @@ void *module_alloc(unsigned long size) ...@@ -55,9 +55,10 @@ void *module_alloc(unsigned long size)
* less likely that the module region gets exhausted, so we * less likely that the module region gets exhausted, so we
* can simply omit this fallback in that case. * can simply omit this fallback in that case.
*/ */
p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, module_alloc_base + SZ_4G, GFP_KERNEL,
NUMA_NO_NODE, __builtin_return_address(0)); PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) { if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p); vfree(p);
...@@ -197,6 +198,34 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, ...@@ -197,6 +198,34 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
return 0; return 0;
} }
static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
{
u32 insn;
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
!cpus_have_const_cap(ARM64_WORKAROUND_843419) ||
((u64)place & 0xfff) < 0xff8)
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
AARCH64_INSN_IMM_ADR);
/* patch ADRP to ADR if it is in range */
if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
AARCH64_INSN_IMM_ADR)) {
insn = le32_to_cpu(*place);
insn &= ~BIT(31);
} else {
/* out of range for ADR -> emit a veneer */
val = module_emit_adrp_veneer(mod, place, val & ~0xfff);
if (!val)
return -ENOEXEC;
insn = aarch64_insn_gen_branch_imm((u64)place, val,
AARCH64_INSN_BRANCH_NOLINK);
}
*place = cpu_to_le32(insn);
return 0;
}
int apply_relocate_add(Elf64_Shdr *sechdrs, int apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab, const char *strtab,
unsigned int symindex, unsigned int symindex,
...@@ -336,14 +365,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -336,14 +365,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
AARCH64_INSN_IMM_ADR); AARCH64_INSN_IMM_ADR);
break; break;
#ifndef CONFIG_ARM64_ERRATUM_843419
case R_AARCH64_ADR_PREL_PG_HI21_NC: case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false; overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21: case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, ovf = reloc_insn_adrp(me, loc, val);
AARCH64_INSN_IMM_ADR); if (ovf && ovf != -ERANGE)
return ovf;
break; break;
#endif
case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false; overflow_check = false;
...@@ -386,6 +414,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -386,6 +414,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
ovf == -ERANGE) { ovf == -ERANGE) {
val = module_emit_plt_entry(me, loc, &rel[i], sym); val = module_emit_plt_entry(me, loc, &rel[i], sym);
if (!val)
return -ENOEXEC;
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
26, AARCH64_INSN_IMM_26); 26, AARCH64_INSN_IMM_26);
} }
......
...@@ -257,7 +257,7 @@ static void tls_thread_flush(void) ...@@ -257,7 +257,7 @@ static void tls_thread_flush(void)
write_sysreg(0, tpidr_el0); write_sysreg(0, tpidr_el0);
if (is_compat_task()) { if (is_compat_task()) {
current->thread.tp_value = 0; current->thread.uw.tp_value = 0;
/* /*
* We need to ensure ordering between the shadow state and the * We need to ensure ordering between the shadow state and the
...@@ -351,7 +351,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -351,7 +351,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
* for the new thread. * for the new thread.
*/ */
if (clone_flags & CLONE_SETTLS) if (clone_flags & CLONE_SETTLS)
p->thread.tp_value = childregs->regs[3]; p->thread.uw.tp_value = childregs->regs[3];
} else { } else {
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h; childregs->pstate = PSR_MODE_EL1h;
...@@ -379,7 +379,7 @@ static void tls_thread_switch(struct task_struct *next) ...@@ -379,7 +379,7 @@ static void tls_thread_switch(struct task_struct *next)
tls_preserve_current_state(); tls_preserve_current_state();
if (is_compat_thread(task_thread_info(next))) if (is_compat_thread(task_thread_info(next)))
write_sysreg(next->thread.tp_value, tpidrro_el0); write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
else if (!arm64_kernel_unmapped_at_el0()) else if (!arm64_kernel_unmapped_at_el0())
write_sysreg(0, tpidrro_el0); write_sysreg(0, tpidrro_el0);
......
...@@ -209,7 +209,7 @@ static void ptrace_hbptriggered(struct perf_event *bp, ...@@ -209,7 +209,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger); force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger);
} }
#endif #endif
force_sig_info(SIGTRAP, &info, current); arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current);
} }
/* /*
...@@ -629,7 +629,7 @@ static int __fpr_get(struct task_struct *target, ...@@ -629,7 +629,7 @@ static int __fpr_get(struct task_struct *target,
sve_sync_to_fpsimd(target); sve_sync_to_fpsimd(target);
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.uw.fpsimd_state;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
start_pos, start_pos + sizeof(*uregs)); start_pos, start_pos + sizeof(*uregs));
...@@ -655,19 +655,19 @@ static int __fpr_set(struct task_struct *target, ...@@ -655,19 +655,19 @@ static int __fpr_set(struct task_struct *target,
struct user_fpsimd_state newstate; struct user_fpsimd_state newstate;
/* /*
* Ensure target->thread.fpsimd_state is up to date, so that a * Ensure target->thread.uw.fpsimd_state is up to date, so that a
* short copyin can't resurrect stale data. * short copyin can't resurrect stale data.
*/ */
sve_sync_to_fpsimd(target); sve_sync_to_fpsimd(target);
newstate = target->thread.fpsimd_state.user_fpsimd; newstate = target->thread.uw.fpsimd_state;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
start_pos, start_pos + sizeof(newstate)); start_pos, start_pos + sizeof(newstate));
if (ret) if (ret)
return ret; return ret;
target->thread.fpsimd_state.user_fpsimd = newstate; target->thread.uw.fpsimd_state = newstate;
return ret; return ret;
} }
...@@ -692,7 +692,7 @@ static int tls_get(struct task_struct *target, const struct user_regset *regset, ...@@ -692,7 +692,7 @@ static int tls_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf) void *kbuf, void __user *ubuf)
{ {
unsigned long *tls = &target->thread.tp_value; unsigned long *tls = &target->thread.uw.tp_value;
if (target == current) if (target == current)
tls_preserve_current_state(); tls_preserve_current_state();
...@@ -705,13 +705,13 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, ...@@ -705,13 +705,13 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf) const void *kbuf, const void __user *ubuf)
{ {
int ret; int ret;
unsigned long tls = target->thread.tp_value; unsigned long tls = target->thread.uw.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret) if (ret)
return ret; return ret;
target->thread.tp_value = tls; target->thread.uw.tp_value = tls;
return ret; return ret;
} }
...@@ -842,7 +842,7 @@ static int sve_get(struct task_struct *target, ...@@ -842,7 +842,7 @@ static int sve_get(struct task_struct *target,
start = end; start = end;
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fpsimd_state.fpsr, &target->thread.uw.fpsimd_state.fpsr,
start, end); start, end);
if (ret) if (ret)
return ret; return ret;
...@@ -941,7 +941,7 @@ static int sve_set(struct task_struct *target, ...@@ -941,7 +941,7 @@ static int sve_set(struct task_struct *target,
start = end; start = end;
end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpsimd_state.fpsr, &target->thread.uw.fpsimd_state.fpsr,
start, end); start, end);
out: out:
...@@ -1169,7 +1169,7 @@ static int compat_vfp_get(struct task_struct *target, ...@@ -1169,7 +1169,7 @@ static int compat_vfp_get(struct task_struct *target,
compat_ulong_t fpscr; compat_ulong_t fpscr;
int ret, vregs_end_pos; int ret, vregs_end_pos;
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.uw.fpsimd_state;
if (target == current) if (target == current)
fpsimd_preserve_current_state(); fpsimd_preserve_current_state();
...@@ -1202,7 +1202,7 @@ static int compat_vfp_set(struct task_struct *target, ...@@ -1202,7 +1202,7 @@ static int compat_vfp_set(struct task_struct *target,
compat_ulong_t fpscr; compat_ulong_t fpscr;
int ret, vregs_end_pos; int ret, vregs_end_pos;
uregs = &target->thread.fpsimd_state.user_fpsimd; uregs = &target->thread.uw.fpsimd_state;
vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
...@@ -1225,7 +1225,7 @@ static int compat_tls_get(struct task_struct *target, ...@@ -1225,7 +1225,7 @@ static int compat_tls_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos, const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf) unsigned int count, void *kbuf, void __user *ubuf)
{ {
compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value; compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
} }
...@@ -1235,13 +1235,13 @@ static int compat_tls_set(struct task_struct *target, ...@@ -1235,13 +1235,13 @@ static int compat_tls_set(struct task_struct *target,
const void __user *ubuf) const void __user *ubuf)
{ {
int ret; int ret;
compat_ulong_t tls = target->thread.tp_value; compat_ulong_t tls = target->thread.uw.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret) if (ret)
return ret; return ret;
target->thread.tp_value = tls; target->thread.uw.tp_value = tls;
return ret; return ret;
} }
...@@ -1538,7 +1538,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, ...@@ -1538,7 +1538,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break; break;
case COMPAT_PTRACE_GET_THREAD_AREA: case COMPAT_PTRACE_GET_THREAD_AREA:
ret = put_user((compat_ulong_t)child->thread.tp_value, ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
(compat_ulong_t __user *)datap); (compat_ulong_t __user *)datap);
break; break;
......
...@@ -28,6 +28,7 @@ asmlinkage u64 absolute_data16(void); ...@@ -28,6 +28,7 @@ asmlinkage u64 absolute_data16(void);
asmlinkage u64 signed_movw(void); asmlinkage u64 signed_movw(void);
asmlinkage u64 unsigned_movw(void); asmlinkage u64 unsigned_movw(void);
asmlinkage u64 relative_adrp(void); asmlinkage u64 relative_adrp(void);
asmlinkage u64 relative_adrp_far(void);
asmlinkage u64 relative_adr(void); asmlinkage u64 relative_adr(void);
asmlinkage u64 relative_data64(void); asmlinkage u64 relative_data64(void);
asmlinkage u64 relative_data32(void); asmlinkage u64 relative_data32(void);
...@@ -43,9 +44,8 @@ static struct { ...@@ -43,9 +44,8 @@ static struct {
{ "R_AARCH64_ABS16", absolute_data16, UL(SYM16_ABS_VAL) }, { "R_AARCH64_ABS16", absolute_data16, UL(SYM16_ABS_VAL) },
{ "R_AARCH64_MOVW_SABS_Gn", signed_movw, UL(SYM64_ABS_VAL) }, { "R_AARCH64_MOVW_SABS_Gn", signed_movw, UL(SYM64_ABS_VAL) },
{ "R_AARCH64_MOVW_UABS_Gn", unsigned_movw, UL(SYM64_ABS_VAL) }, { "R_AARCH64_MOVW_UABS_Gn", unsigned_movw, UL(SYM64_ABS_VAL) },
#ifndef CONFIG_ARM64_ERRATUM_843419
{ "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp, (u64)&sym64_rel }, { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp, (u64)&sym64_rel },
#endif { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp_far, (u64)&memstart_addr },
{ "R_AARCH64_ADR_PREL_LO21", relative_adr, (u64)&sym64_rel }, { "R_AARCH64_ADR_PREL_LO21", relative_adr, (u64)&sym64_rel },
{ "R_AARCH64_PREL64", relative_data64, (u64)&sym64_rel }, { "R_AARCH64_PREL64", relative_data64, (u64)&sym64_rel },
{ "R_AARCH64_PREL32", relative_data32, (u64)&sym64_rel }, { "R_AARCH64_PREL32", relative_data32, (u64)&sym64_rel },
......
...@@ -43,15 +43,21 @@ ENTRY(unsigned_movw) ...@@ -43,15 +43,21 @@ ENTRY(unsigned_movw)
ret ret
ENDPROC(unsigned_movw) ENDPROC(unsigned_movw)
#ifndef CONFIG_ARM64_ERRATUM_843419 .align 12
.space 0xff8
ENTRY(relative_adrp) ENTRY(relative_adrp)
adrp x0, sym64_rel adrp x0, sym64_rel
add x0, x0, #:lo12:sym64_rel add x0, x0, #:lo12:sym64_rel
ret ret
ENDPROC(relative_adrp) ENDPROC(relative_adrp)
#endif .align 12
.space 0xffc
ENTRY(relative_adrp_far)
adrp x0, memstart_addr
add x0, x0, #:lo12:memstart_addr
ret
ENDPROC(relative_adrp_far)
ENTRY(relative_adr) ENTRY(relative_adr)
adr x0, sym64_rel adr x0, sym64_rel
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/signal32.h> #include <asm/signal32.h>
#include <asm/traps.h>
#include <asm/vdso.h> #include <asm/vdso.h>
/* /*
...@@ -179,7 +180,7 @@ static void __user *apply_user_offset( ...@@ -179,7 +180,7 @@ static void __user *apply_user_offset(
static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
{ {
struct user_fpsimd_state const *fpsimd = struct user_fpsimd_state const *fpsimd =
&current->thread.fpsimd_state.user_fpsimd; &current->thread.uw.fpsimd_state;
int err; int err;
/* copy the FP and status/control registers */ /* copy the FP and status/control registers */
...@@ -565,11 +566,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) ...@@ -565,11 +566,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
return regs->regs[0]; return regs->regs[0];
badframe: badframe:
if (show_unhandled_signals) arm64_notify_segfault(regs->sp);
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
regs->pc, regs->sp);
force_sig(SIGSEGV, current);
return 0; return 0;
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/signal32.h> #include <asm/signal32.h>
#include <asm/traps.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
...@@ -149,7 +150,7 @@ union __fpsimd_vreg { ...@@ -149,7 +150,7 @@ union __fpsimd_vreg {
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{ {
struct user_fpsimd_state const *fpsimd = struct user_fpsimd_state const *fpsimd =
&current->thread.fpsimd_state.user_fpsimd; &current->thread.uw.fpsimd_state;
compat_ulong_t magic = VFP_MAGIC; compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE; compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc; compat_ulong_t fpscr, fpexc;
...@@ -307,11 +308,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) ...@@ -307,11 +308,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
return regs->regs[0]; return regs->regs[0];
badframe: badframe:
if (show_unhandled_signals) arm64_notify_segfault(regs->compat_sp);
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current);
return 0; return 0;
} }
...@@ -344,11 +341,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) ...@@ -344,11 +341,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
return regs->regs[0]; return regs->regs[0];
badframe: badframe:
if (show_unhandled_signals) arm64_notify_segfault(regs->compat_sp);
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current);
return 0; return 0;
} }
......
...@@ -85,43 +85,6 @@ enum ipi_msg_type { ...@@ -85,43 +85,6 @@ enum ipi_msg_type {
IPI_WAKEUP IPI_WAKEUP
}; };
#ifdef CONFIG_ARM64_VHE
/* Whether the boot CPU is running in HYP mode or not*/
static bool boot_cpu_hyp_mode;
static inline void save_boot_cpu_run_el(void)
{
boot_cpu_hyp_mode = is_kernel_in_hyp_mode();
}
static inline bool is_boot_cpu_in_hyp_mode(void)
{
return boot_cpu_hyp_mode;
}
/*
* Verify that a secondary CPU is running the kernel at the same
* EL as that of the boot CPU.
*/
void verify_cpu_run_el(void)
{
bool in_el2 = is_kernel_in_hyp_mode();
bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode();
if (in_el2 ^ boot_cpu_el2) {
pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n",
smp_processor_id(),
in_el2 ? 2 : 1,
boot_cpu_el2 ? 2 : 1);
cpu_panic_kernel();
}
}
#else
static inline void save_boot_cpu_run_el(void) {}
#endif
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int op_cpu_kill(unsigned int cpu); static int op_cpu_kill(unsigned int cpu);
#else #else
...@@ -447,13 +410,6 @@ void __init smp_prepare_boot_cpu(void) ...@@ -447,13 +410,6 @@ void __init smp_prepare_boot_cpu(void)
*/ */
jump_label_init(); jump_label_init();
cpuinfo_store_boot_cpu(); cpuinfo_store_boot_cpu();
save_boot_cpu_run_el();
/*
* Run the errata work around checks on the boot CPU, once we have
* initialised the cpu feature infrastructure from
* cpuinfo_store_boot_cpu() above.
*/
update_cpu_errata_workarounds();
} }
static u64 __init of_get_cpu_mpidr(struct device_node *dn) static u64 __init of_get_cpu_mpidr(struct device_node *dn)
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/system_misc.h>
#include <asm/unistd.h> #include <asm/unistd.h>
static long static long
...@@ -67,6 +68,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) ...@@ -67,6 +68,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
*/ */
long compat_arm_syscall(struct pt_regs *regs) long compat_arm_syscall(struct pt_regs *regs)
{ {
siginfo_t info;
unsigned int no = regs->regs[7]; unsigned int no = regs->regs[7];
switch (no) { switch (no) {
...@@ -88,7 +90,7 @@ long compat_arm_syscall(struct pt_regs *regs) ...@@ -88,7 +90,7 @@ long compat_arm_syscall(struct pt_regs *regs)
return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
case __ARM_NR_compat_set_tls: case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0]; current->thread.uw.tp_value = regs->regs[0];
/* /*
* Protect against register corruption from context switch. * Protect against register corruption from context switch.
...@@ -99,6 +101,23 @@ long compat_arm_syscall(struct pt_regs *regs) ...@@ -99,6 +101,23 @@ long compat_arm_syscall(struct pt_regs *regs)
return 0; return 0;
default: default:
return -ENOSYS; /*
* Calls 9f00xx..9f07ff are defined to return -ENOSYS
* if not implemented, rather than raising SIGILL. This
* way the calling program can gracefully determine whether
* a feature is supported.
*/
if ((no & 0xffff) <= 0x7ff)
return -ENOSYS;
break;
} }
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void __user *)instruction_pointer(regs) -
(compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no);
return 0;
} }
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/bug.h> #include <asm/bug.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h> #include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
...@@ -223,13 +224,46 @@ void die(const char *str, struct pt_regs *regs, int err) ...@@ -223,13 +224,46 @@ void die(const char *str, struct pt_regs *regs, int err)
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
static bool show_unhandled_signals_ratelimited(void)
{
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
return show_unhandled_signals && __ratelimit(&rs);
}
void arm64_force_sig_info(struct siginfo *info, const char *str,
struct task_struct *tsk)
{
unsigned int esr = tsk->thread.fault_code;
struct pt_regs *regs = task_pt_regs(tsk);
if (!unhandled_signal(tsk, info->si_signo))
goto send_sig;
if (!show_unhandled_signals_ratelimited())
goto send_sig;
pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
if (esr)
pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
pr_cont("%s", str);
print_vma_addr(KERN_CONT " in ", regs->pc);
pr_cont("\n");
__show_regs(regs);
send_sig:
force_sig_info(info->si_signo, info, tsk);
}
void arm64_notify_die(const char *str, struct pt_regs *regs, void arm64_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, int err) struct siginfo *info, int err)
{ {
if (user_mode(regs)) { if (user_mode(regs)) {
WARN_ON(regs != current_pt_regs());
current->thread.fault_address = 0; current->thread.fault_address = 0;
current->thread.fault_code = err; current->thread.fault_code = err;
force_sig_info(info->si_signo, info, current); arm64_force_sig_info(info, str, current);
} else { } else {
die(str, regs, err); die(str, regs, err);
} }
...@@ -311,12 +345,13 @@ static int call_undef_hook(struct pt_regs *regs) ...@@ -311,12 +345,13 @@ static int call_undef_hook(struct pt_regs *regs)
return fn ? fn(regs, instr) : 1; return fn ? fn(regs, instr) : 1;
} }
void force_signal_inject(int signal, int code, struct pt_regs *regs, void force_signal_inject(int signal, int code, unsigned long address)
unsigned long address)
{ {
siginfo_t info; siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
const char *desc; const char *desc;
struct pt_regs *regs = current_pt_regs();
clear_siginfo(&info);
switch (signal) { switch (signal) {
case SIGILL: case SIGILL:
...@@ -330,17 +365,16 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs, ...@@ -330,17 +365,16 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs,
break; break;
} }
if (unhandled_signal(current, signal) && /* Force signals we don't understand to SIGKILL */
show_unhandled_signals_ratelimited()) { if (WARN_ON(signal != SIGKILL ||
pr_info("%s[%d]: %s: pc=%p\n", siginfo_layout(signal, code) != SIL_FAULT)) {
current->comm, task_pid_nr(current), desc, pc); signal = SIGKILL;
dump_instr(KERN_INFO, regs);
} }
info.si_signo = signal; info.si_signo = signal;
info.si_errno = 0; info.si_errno = 0;
info.si_code = code; info.si_code = code;
info.si_addr = pc; info.si_addr = (void __user *)address;
arm64_notify_die(desc, regs, &info, 0); arm64_notify_die(desc, regs, &info, 0);
} }
...@@ -348,7 +382,7 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs, ...@@ -348,7 +382,7 @@ void force_signal_inject(int signal, int code, struct pt_regs *regs,
/* /*
* Set up process info to signal segmentation fault - called on access error. * Set up process info to signal segmentation fault - called on access error.
*/ */
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr) void arm64_notify_segfault(unsigned long addr)
{ {
int code; int code;
...@@ -359,7 +393,7 @@ void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr) ...@@ -359,7 +393,7 @@ void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr)
code = SEGV_ACCERR; code = SEGV_ACCERR;
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
force_signal_inject(SIGSEGV, code, regs, addr); force_signal_inject(SIGSEGV, code, addr);
} }
asmlinkage void __exception do_undefinstr(struct pt_regs *regs) asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
...@@ -371,13 +405,12 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) ...@@ -371,13 +405,12 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0) if (call_undef_hook(regs) == 0)
return; return;
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
} }
int cpu_enable_cache_maint_trap(void *__unused) void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{ {
config_sctlr_el1(SCTLR_EL1_UCI, 0); config_sctlr_el1(SCTLR_EL1_UCI, 0);
return 0;
} }
#define __user_cache_maint(insn, address, res) \ #define __user_cache_maint(insn, address, res) \
...@@ -426,12 +459,12 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) ...@@ -426,12 +459,12 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
__user_cache_maint("ic ivau", address, ret); __user_cache_maint("ic ivau", address, ret);
break; break;
default: default:
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
return; return;
} }
if (ret) if (ret)
arm64_notify_segfault(regs, address); arm64_notify_segfault(address);
else else
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
} }
...@@ -600,11 +633,6 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) ...@@ -600,11 +633,6 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{ {
siginfo_t info; siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs); void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
smp_processor_id(), esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL; info.si_signo = SIGILL;
info.si_errno = 0; info.si_errno = 0;
...@@ -612,9 +640,9 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) ...@@ -612,9 +640,9 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
info.si_addr = pc; info.si_addr = pc;
current->thread.fault_address = 0; current->thread.fault_address = 0;
current->thread.fault_code = 0; current->thread.fault_code = esr;
force_sig_info(info.si_signo, &info, current); arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current);
} }
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
......
...@@ -17,6 +17,7 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \ ...@@ -17,6 +17,7 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \ -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
-fcall-saved-x18 -fcall-saved-x18 -fomit-frame-pointer
CFLAGS_REMOVE_atomic_ll_sc.o := -pg
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
...@@ -50,6 +50,10 @@ ENTRY(flush_icache_range) ...@@ -50,6 +50,10 @@ ENTRY(flush_icache_range)
*/ */
ENTRY(__flush_cache_user_range) ENTRY(__flush_cache_user_range)
uaccess_ttbr0_enable x2, x3, x4 uaccess_ttbr0_enable x2, x3, x4
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
b 7f
alternative_else_nop_endif
dcache_line_size x2, x3 dcache_line_size x2, x3
sub x3, x2, #1 sub x3, x2, #1
bic x4, x0, x3 bic x4, x0, x3
...@@ -60,8 +64,13 @@ user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE ...@@ -60,8 +64,13 @@ user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
b.lo 1b b.lo 1b
dsb ish dsb ish
7:
alternative_if ARM64_HAS_CACHE_DIC
isb
b 8f
alternative_else_nop_endif
invalidate_icache_by_line x0, x1, x2, x3, 9f invalidate_icache_by_line x0, x1, x2, x3, 9f
mov x0, #0 8: mov x0, #0
1: 1:
uaccess_ttbr0_disable x1, x2 uaccess_ttbr0_disable x1, x2
ret ret
...@@ -80,6 +89,12 @@ ENDPROC(__flush_cache_user_range) ...@@ -80,6 +89,12 @@ ENDPROC(__flush_cache_user_range)
* - end - virtual end address of region * - end - virtual end address of region
*/ */
ENTRY(invalidate_icache_range) ENTRY(invalidate_icache_range)
alternative_if ARM64_HAS_CACHE_DIC
mov x0, xzr
isb
ret
alternative_else_nop_endif
uaccess_ttbr0_enable x2, x3, x4 uaccess_ttbr0_enable x2, x3, x4
invalidate_icache_by_line x0, x1, x2, x3, 2f invalidate_icache_by_line x0, x1, x2, x3, 2f
...@@ -116,6 +131,10 @@ ENDPIPROC(__flush_dcache_area) ...@@ -116,6 +131,10 @@ ENDPIPROC(__flush_dcache_area)
* - size - size in question * - size - size in question
*/ */
ENTRY(__clean_dcache_area_pou) ENTRY(__clean_dcache_area_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3 dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret ret
ENDPROC(__clean_dcache_area_pou) ENDPROC(__clean_dcache_area_pou)
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/traps.h>
#include <acpi/ghes.h> #include <acpi/ghes.h>
...@@ -289,58 +290,31 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, ...@@ -289,58 +290,31 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
do_exit(SIGKILL); do_exit(SIGKILL);
} }
static void __do_user_fault(struct task_struct *tsk, unsigned long addr, static void __do_user_fault(struct siginfo *info, unsigned int esr)
unsigned int esr, unsigned int sig, int code,
struct pt_regs *regs, int fault)
{ {
struct siginfo si; current->thread.fault_address = (unsigned long)info->si_addr;
const struct fault_info *inf; current->thread.fault_code = esr;
unsigned int lsb = 0; arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
inf = esr_to_fault_info(esr);
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x",
tsk->comm, task_pid_nr(tsk), inf->name, sig,
addr, esr);
print_vma_addr(KERN_CONT ", in ", regs->pc);
pr_cont("\n");
__show_regs(regs);
}
tsk->thread.fault_address = addr;
tsk->thread.fault_code = esr;
si.si_signo = sig;
si.si_errno = 0;
si.si_code = code;
si.si_addr = (void __user *)addr;
/*
* Either small page or large page may be poisoned.
* In other words, VM_FAULT_HWPOISON_LARGE and
* VM_FAULT_HWPOISON are mutually exclusive.
*/
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
else if (fault & VM_FAULT_HWPOISON)
lsb = PAGE_SHIFT;
si.si_addr_lsb = lsb;
force_sig_info(sig, &si, tsk);
} }
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{ {
struct task_struct *tsk = current;
const struct fault_info *inf;
/* /*
* If we are in kernel mode at this point, we have no context to * If we are in kernel mode at this point, we have no context to
* handle this fault with. * handle this fault with.
*/ */
if (user_mode(regs)) { if (user_mode(regs)) {
inf = esr_to_fault_info(esr); const struct fault_info *inf = esr_to_fault_info(esr);
__do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs, 0); struct siginfo si = {
} else .si_signo = inf->sig,
.si_code = inf->code,
.si_addr = (void __user *)addr,
};
__do_user_fault(&si, esr);
} else {
__do_kernel_fault(addr, esr, regs); __do_kernel_fault(addr, esr, regs);
}
} }
#define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADMAP 0x010000
...@@ -393,7 +367,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -393,7 +367,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
{ {
struct task_struct *tsk; struct task_struct *tsk;
struct mm_struct *mm; struct mm_struct *mm;
int fault, sig, code, major = 0; struct siginfo si;
int fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE; unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
...@@ -525,27 +500,37 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -525,27 +500,37 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
return 0; return 0;
} }
clear_siginfo(&si);
si.si_addr = (void __user *)addr;
if (fault & VM_FAULT_SIGBUS) { if (fault & VM_FAULT_SIGBUS) {
/* /*
* We had some memory, but were unable to successfully fix up * We had some memory, but were unable to successfully fix up
* this page fault. * this page fault.
*/ */
sig = SIGBUS; si.si_signo = SIGBUS;
code = BUS_ADRERR; si.si_code = BUS_ADRERR;
} else if (fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { } else if (fault & VM_FAULT_HWPOISON_LARGE) {
sig = SIGBUS; unsigned int hindex = VM_FAULT_GET_HINDEX(fault);
code = BUS_MCEERR_AR;
si.si_signo = SIGBUS;
si.si_code = BUS_MCEERR_AR;
si.si_addr_lsb = hstate_index_to_shift(hindex);
} else if (fault & VM_FAULT_HWPOISON) {
si.si_signo = SIGBUS;
si.si_code = BUS_MCEERR_AR;
si.si_addr_lsb = PAGE_SHIFT;
} else { } else {
/* /*
* Something tried to access memory that isn't in our memory * Something tried to access memory that isn't in our memory
* map. * map.
*/ */
sig = SIGSEGV; si.si_signo = SIGSEGV;
code = fault == VM_FAULT_BADACCESS ? si.si_code = fault == VM_FAULT_BADACCESS ?
SEGV_ACCERR : SEGV_MAPERR; SEGV_ACCERR : SEGV_MAPERR;
} }
__do_user_fault(tsk, addr, esr, sig, code, regs, fault); __do_user_fault(&si, esr);
return 0; return 0;
no_context: no_context:
...@@ -582,8 +567,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) ...@@ -582,8 +567,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
const struct fault_info *inf; const struct fault_info *inf;
inf = esr_to_fault_info(esr); inf = esr_to_fault_info(esr);
pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
inf->name, esr, addr);
/* /*
* Synchronous aborts may interrupt code which had interrupts masked. * Synchronous aborts may interrupt code which had interrupts masked.
...@@ -600,83 +583,83 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) ...@@ -600,83 +583,83 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
nmi_exit(); nmi_exit();
} }
info.si_signo = SIGBUS; info.si_signo = inf->sig;
info.si_errno = 0; info.si_errno = 0;
info.si_code = BUS_FIXME; info.si_code = inf->code;
if (esr & ESR_ELx_FnV) if (esr & ESR_ELx_FnV)
info.si_addr = NULL; info.si_addr = NULL;
else else
info.si_addr = (void __user *)addr; info.si_addr = (void __user *)addr;
arm64_notify_die("", regs, &info, esr); arm64_notify_die(inf->name, regs, &info, esr);
return 0; return 0;
} }
static const struct fault_info fault_info[] = { static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, BUS_FIXME, "ttbr address size fault" }, { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" },
{ do_bad, SIGBUS, BUS_FIXME, "level 1 address size fault" }, { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" },
{ do_bad, SIGBUS, BUS_FIXME, "level 2 address size fault" }, { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" },
{ do_bad, SIGBUS, BUS_FIXME, "level 3 address size fault" }, { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 8" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 12" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 12" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_sea, SIGBUS, BUS_FIXME, "synchronous external abort" }, { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 17" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 17" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 18" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 18" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 19" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 19" },
{ do_sea, SIGBUS, BUS_FIXME, "level 0 (translation table walk)" }, { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" },
{ do_sea, SIGBUS, BUS_FIXME, "level 1 (translation table walk)" }, { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" },
{ do_sea, SIGBUS, BUS_FIXME, "level 2 (translation table walk)" }, { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" },
{ do_sea, SIGBUS, BUS_FIXME, "level 3 (translation table walk)" }, { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" },
{ do_sea, SIGBUS, BUS_FIXME, "synchronous parity or ECC error" }, // Reserved when RAS is implemented { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
{ do_bad, SIGBUS, BUS_FIXME, "unknown 25" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 25" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 26" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 26" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 27" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 27" },
{ do_sea, SIGBUS, BUS_FIXME, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_sea, SIGBUS, BUS_FIXME, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_sea, SIGBUS, BUS_FIXME, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_sea, SIGBUS, BUS_FIXME, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_bad, SIGBUS, BUS_FIXME, "unknown 32" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 32" },
{ do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 34" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 34" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 35" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 35" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 36" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 36" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 37" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 37" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 38" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 38" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 39" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 39" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 40" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 40" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 41" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 41" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 42" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 42" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 43" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 43" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 44" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 44" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 45" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 45" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 46" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 46" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 47" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 47" },
{ do_bad, SIGBUS, BUS_FIXME, "TLB conflict abort" }, { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" },
{ do_bad, SIGBUS, BUS_FIXME, "Unsupported atomic hardware update fault" }, { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 50" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 50" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 51" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 51" },
{ do_bad, SIGBUS, BUS_FIXME, "implementation fault (lockdown abort)" }, { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" },
{ do_bad, SIGBUS, BUS_FIXME, "implementation fault (unsupported exclusive)" }, { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 54" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 54" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 55" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 55" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 56" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 56" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 57" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 57" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 58" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 58" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 59" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 59" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 60" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 60" },
{ do_bad, SIGBUS, BUS_FIXME, "section domain fault" }, { do_bad, SIGKILL, SI_KERNEL, "section domain fault" },
{ do_bad, SIGBUS, BUS_FIXME, "page domain fault" }, { do_bad, SIGKILL, SI_KERNEL, "page domain fault" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 63" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
}; };
int handle_guest_sea(phys_addr_t addr, unsigned int esr) int handle_guest_sea(phys_addr_t addr, unsigned int esr)
...@@ -698,19 +681,17 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, ...@@ -698,19 +681,17 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
if (!inf->fn(addr, esr, regs)) if (!inf->fn(addr, esr, regs))
return; return;
pr_alert("Unhandled fault: %s at 0x%016lx\n", if (!user_mode(regs)) {
inf->name, addr); pr_alert("Unhandled fault at 0x%016lx\n", addr);
mem_abort_decode(esr);
mem_abort_decode(esr);
if (!user_mode(regs))
show_pte(addr); show_pte(addr);
}
info.si_signo = inf->sig; info.si_signo = inf->sig;
info.si_errno = 0; info.si_errno = 0;
info.si_code = inf->code; info.si_code = inf->code;
info.si_addr = (void __user *)addr; info.si_addr = (void __user *)addr;
arm64_notify_die("", regs, &info, esr); arm64_notify_die(inf->name, regs, &info, esr);
} }
asmlinkage void __exception do_el0_irq_bp_hardening(void) asmlinkage void __exception do_el0_irq_bp_hardening(void)
...@@ -741,7 +722,6 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, ...@@ -741,7 +722,6 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct siginfo info; struct siginfo info;
struct task_struct *tsk = current;
if (user_mode(regs)) { if (user_mode(regs)) {
if (instruction_pointer(regs) > TASK_SIZE) if (instruction_pointer(regs) > TASK_SIZE)
...@@ -749,17 +729,11 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, ...@@ -749,17 +729,11 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
local_irq_enable(); local_irq_enable();
} }
if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
tsk->comm, task_pid_nr(tsk),
esr_get_class_string(esr), (void *)regs->pc,
(void *)regs->sp);
info.si_signo = SIGBUS; info.si_signo = SIGBUS;
info.si_errno = 0; info.si_errno = 0;
info.si_code = BUS_ADRALN; info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)addr; info.si_addr = (void __user *)addr;
arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr); arm64_notify_die("SP/PC alignment exception", regs, &info, esr);
} }
int __init early_brk64(unsigned long addr, unsigned int esr, int __init early_brk64(unsigned long addr, unsigned int esr,
...@@ -774,11 +748,11 @@ static struct fault_info __refdata debug_fault_info[] = { ...@@ -774,11 +748,11 @@ static struct fault_info __refdata debug_fault_info[] = {
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 3" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 3" },
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
{ do_bad, SIGTRAP, TRAP_FIXME, "aarch32 vector catch" }, { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" },
{ early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
{ do_bad, SIGBUS, BUS_FIXME, "unknown 7" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 7" },
}; };
void __init hook_debug_fault_code(int nr, void __init hook_debug_fault_code(int nr,
...@@ -814,14 +788,11 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, ...@@ -814,14 +788,11 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (!inf->fn(addr, esr, regs)) { if (!inf->fn(addr, esr, regs)) {
rv = 1; rv = 1;
} else { } else {
pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
inf->name, esr, addr);
info.si_signo = inf->sig; info.si_signo = inf->sig;
info.si_errno = 0; info.si_errno = 0;
info.si_code = inf->code; info.si_code = inf->code;
info.si_addr = (void __user *)addr; info.si_addr = (void __user *)addr;
arm64_notify_die("", regs, &info, 0); arm64_notify_die(inf->name, regs, &info, esr);
rv = 0; rv = 0;
} }
...@@ -833,7 +804,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, ...@@ -833,7 +804,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
NOKPROBE_SYMBOL(do_debug_exception); NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN #ifdef CONFIG_ARM64_PAN
int cpu_enable_pan(void *__unused) void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{ {
/* /*
* We modify PSTATE. This won't work from irq context as the PSTATE * We modify PSTATE. This won't work from irq context as the PSTATE
...@@ -843,6 +814,5 @@ int cpu_enable_pan(void *__unused) ...@@ -843,6 +814,5 @@ int cpu_enable_pan(void *__unused)
config_sctlr_el1(SCTLR_EL1_SPAN, 0); config_sctlr_el1(SCTLR_EL1_SPAN, 0);
asm(SET_PSTATE_PAN(1)); asm(SET_PSTATE_PAN(1));
return 0;
} }
#endif /* CONFIG_ARM64_PAN */ #endif /* CONFIG_ARM64_PAN */
...@@ -36,6 +36,12 @@ ...@@ -36,6 +36,12 @@
#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
#endif #endif
#ifdef CONFIG_RANDOMIZE_BASE
#define TCR_KASLR_FLAGS TCR_NFD1
#else
#define TCR_KASLR_FLAGS 0
#endif
#define TCR_SMP_FLAGS TCR_SHARED #define TCR_SMP_FLAGS TCR_SHARED
/* PTWs cacheable, inner/outer WBWA */ /* PTWs cacheable, inner/outer WBWA */
...@@ -432,7 +438,8 @@ ENTRY(__cpu_setup) ...@@ -432,7 +438,8 @@ ENTRY(__cpu_setup)
* both user and kernel. * both user and kernel.
*/ */
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1
tcr_set_idmap_t0sz x10, x9 tcr_set_idmap_t0sz x10, x9
/* /*
...@@ -441,16 +448,15 @@ ENTRY(__cpu_setup) ...@@ -441,16 +448,15 @@ ENTRY(__cpu_setup)
tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM #ifdef CONFIG_ARM64_HW_AFDBM
/* /*
* Hardware update of the Access and Dirty bits. * Enable hardware update of the Access Flags bit.
* Hardware dirty bit management is enabled later,
* via capabilities.
*/ */
mrs x9, ID_AA64MMFR1_EL1 mrs x9, ID_AA64MMFR1_EL1
and x9, x9, #0xf and x9, x9, #0xf
cbz x9, 2f cbz x9, 1f
cmp x9, #2 orr x10, x10, #TCR_HA // hardware Access flag update
b.lt 1f 1:
orr x10, x10, #TCR_HD // hardware Dirty flag update
1: orr x10, x10, #TCR_HA // hardware Access flag update
2:
#endif /* CONFIG_ARM64_HW_AFDBM */ #endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10 msr tcr_el1, x10
ret // return to head.S ret // return to head.S
......
...@@ -26,7 +26,7 @@ static inline void signal_compat_build_tests(void) ...@@ -26,7 +26,7 @@ static inline void signal_compat_build_tests(void)
* new fields are handled in copy_siginfo_to_user32()! * new fields are handled in copy_siginfo_to_user32()!
*/ */
BUILD_BUG_ON(NSIGILL != 11); BUILD_BUG_ON(NSIGILL != 11);
BUILD_BUG_ON(NSIGFPE != 13); BUILD_BUG_ON(NSIGFPE != 14);
BUILD_BUG_ON(NSIGSEGV != 7); BUILD_BUG_ON(NSIGSEGV != 7);
BUILD_BUG_ON(NSIGBUS != 5); BUILD_BUG_ON(NSIGBUS != 5);
BUILD_BUG_ON(NSIGTRAP != 4); BUILD_BUG_ON(NSIGTRAP != 4);
......
...@@ -31,11 +31,6 @@ ...@@ -31,11 +31,6 @@
#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
(1 << ACPI_IORT_NODE_SMMU_V3)) (1 << ACPI_IORT_NODE_SMMU_V3))
/* Until ACPICA headers cover IORT rev. C */
#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
#endif
struct iort_its_msi_chip { struct iort_its_msi_chip {
struct list_head list; struct list_head list;
struct fwnode_handle *fw_node; struct fwnode_handle *fw_node;
...@@ -366,7 +361,6 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, ...@@ -366,7 +361,6 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
return NULL; return NULL;
} }
#if (ACPI_CA_VERSION > 0x20170929)
static int iort_get_id_mapping_index(struct acpi_iort_node *node) static int iort_get_id_mapping_index(struct acpi_iort_node *node)
{ {
struct acpi_iort_smmu_v3 *smmu; struct acpi_iort_smmu_v3 *smmu;
...@@ -400,12 +394,6 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node) ...@@ -400,12 +394,6 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
return -EINVAL; return -EINVAL;
} }
} }
#else
static inline int iort_get_id_mapping_index(struct acpi_iort_node *node)
{
return -EINVAL;
}
#endif
static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
u32 id_in, u32 *id_out, u32 id_in, u32 *id_out,
......
...@@ -122,7 +122,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) ...@@ -122,7 +122,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
return pmu_parse_percpu_irq(pmu, irq); return pmu_parse_percpu_irq(pmu, irq);
} }
if (!pmu_has_irq_affinity(pdev->dev.of_node)) { if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) {
pr_warn("no interrupt-affinity property for %pOF, guessing.\n", pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
pdev->dev.of_node); pdev->dev.of_node);
} }
......
...@@ -23,16 +23,30 @@ ...@@ -23,16 +23,30 @@
#define DRVNAME PMUNAME "_pmu" #define DRVNAME PMUNAME "_pmu"
#define pr_fmt(fmt) DRVNAME ": " fmt #define pr_fmt(fmt) DRVNAME ": " fmt
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/capability.h>
#include <linux/cpuhotplug.h> #include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/smp.h>
#include <linux/vmalloc.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/mmu.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#define ARM_SPE_BUF_PAD_BYTE 0 #define ARM_SPE_BUF_PAD_BYTE 0
......
...@@ -599,7 +599,6 @@ ...@@ -599,7 +599,6 @@
IRQCHIP_OF_MATCH_TABLE() \ IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \ ACPI_PROBE_TABLE(timer) \
ACPI_PROBE_TABLE(iort) \
EARLYCON_TABLE() EARLYCON_TABLE()
#define INIT_TEXT \ #define INIT_TEXT \
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#ifndef __LINUX_SIZES_H__ #ifndef __LINUX_SIZES_H__
#define __LINUX_SIZES_H__ #define __LINUX_SIZES_H__
#include <linux/const.h>
#define SZ_1 0x00000001 #define SZ_1 0x00000001
#define SZ_2 0x00000002 #define SZ_2 0x00000002
#define SZ_4 0x00000004 #define SZ_4 0x00000004
...@@ -44,4 +46,6 @@ ...@@ -44,4 +46,6 @@
#define SZ_1G 0x40000000 #define SZ_1G 0x40000000
#define SZ_2G 0x80000000 #define SZ_2G 0x80000000
#define SZ_4G _AC(0x100000000, ULL)
#endif /* __LINUX_SIZES_H__ */ #endif /* __LINUX_SIZES_H__ */
...@@ -207,7 +207,8 @@ typedef struct siginfo { ...@@ -207,7 +207,8 @@ typedef struct siginfo {
#define __FPE_DECERR 11 /* packed decimal error */ #define __FPE_DECERR 11 /* packed decimal error */
#define __FPE_INVASC 12 /* invalid ASCII digit */ #define __FPE_INVASC 12 /* invalid ASCII digit */
#define __FPE_INVDEC 13 /* invalid decimal digit */ #define __FPE_INVDEC 13 /* invalid decimal digit */
#define NSIGFPE 13 #define FPE_FLTUNK 14 /* undiagnosed floating-point exception */
#define NSIGFPE 14
/* /*
* SIGSEGV si_codes * SIGSEGV si_codes
......
...@@ -2843,10 +2843,6 @@ enum siginfo_layout siginfo_layout(int sig, int si_code) ...@@ -2843,10 +2843,6 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
#ifdef FPE_FIXME #ifdef FPE_FIXME
if ((sig == SIGFPE) && (si_code == FPE_FIXME)) if ((sig == SIGFPE) && (si_code == FPE_FIXME))
layout = SIL_FAULT; layout = SIL_FAULT;
#endif
#ifdef BUS_FIXME
if ((sig == SIGBUS) && (si_code == BUS_FIXME))
layout = SIL_FAULT;
#endif #endif
} }
return layout; return layout;
......
...@@ -221,6 +221,7 @@ static int symbol_valid(struct sym_entry *s) ...@@ -221,6 +221,7 @@ static int symbol_valid(struct sym_entry *s)
static char *special_prefixes[] = { static char *special_prefixes[] = {
"__crc_", /* modversions */ "__crc_", /* modversions */
"__efistub_", /* arm64 EFI stub namespace */
NULL }; NULL };
static char *special_suffixes[] = { static char *special_suffixes[] = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment